element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function
|
openshift/openshift-tests-private
|
9202d875-b4d0-4f5a-9112-37ae0ea33309
|
getPodName
|
['"context"']
|
['rsyslog']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (r rsyslog) getPodName(oc *exutil.CLI) string {
pods, err := oc.AdminKubeClient().CoreV1().Pods(r.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "component=" + r.serverName})
o.Expect(err).NotTo(o.HaveOccurred())
var names []string
for i := 0; i < len(pods.Items); i++ {
names = append(names, pods.Items[i].Name)
}
return names[0]
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
6b0fa744-f3be-4984-8158-dac8df4a33a5
|
checkData
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['rsyslog']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (r rsyslog) checkData(oc *exutil.CLI, expect bool, filename string) {
cmd := "ls -l /var/log/clf/" + filename
if expect {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
stdout, err := e2eoutput.RunHostCmdWithRetries(r.namespace, r.getPodName(oc), cmd, 3*time.Second, 15*time.Second)
if err != nil {
if strings.Contains(err.Error(), "No such file or directory") {
return false, nil
}
return false, err
}
return strings.Contains(stdout, filename), nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s doesn't exist", filename))
} else {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
stdout, err := e2eoutput.RunHostCmdWithRetries(r.namespace, r.getPodName(oc), cmd, 3*time.Second, 15*time.Second)
if err != nil {
if strings.Contains(err.Error(), "No such file or directory") {
return true, nil
}
return false, err
}
return strings.Contains(stdout, "No such file or directory"), nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s exists", filename))
}
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
e514e778-ab6f-415d-a6fe-b67cb4c55666
|
createPipelineSecret
|
['"crypto/tls"']
|
['resource', 'fluentdServer']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (f fluentdServer) createPipelineSecret(oc *exutil.CLI, keysPath string) {
secret := resource{"secret", f.secretName, f.loggingNS}
cmd := []string{"secret", "generic", secret.name, "-n", secret.namespace, "--from-file=ca-bundle.crt=" + keysPath + "/ca.crt"}
if f.clientAuth {
cmd = append(cmd, "--from-file=tls.key="+keysPath+"/client.key", "--from-file=tls.crt="+keysPath+"/client.crt")
}
if f.clientPrivateKeyPassphrase != "" {
cmd = append(cmd, "--from-literal=passphrase="+f.clientPrivateKeyPassphrase)
}
if f.sharedKey != "" {
cmd = append(cmd, "--from-literal=shared_key="+f.sharedKey)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args(cmd...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
secret.WaitForResourceToAppear(oc)
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
7679eb52-21f6-4c33-a5fb-5b2f2b263b15
|
deploy
|
['"crypto/tls"', '"fmt"', '"net/http"', '"os"', '"os/exec"', '"path/filepath"', '"cloud.google.com/go/logging"']
|
['resource', 'certsConf', 'fluentdServer']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (f fluentdServer) deploy(oc *exutil.CLI) {
// create SA
sa := resource{"serviceaccount", f.serverName, f.namespace}
err := oc.WithoutNamespace().Run("create").Args("serviceaccount", sa.name, "-n", sa.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sa.WaitForResourceToAppear(oc)
//err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "privileged", fmt.Sprintf("system:serviceaccount:%s:%s", f.namespace, f.serverName), "-n", f.namespace).Execute()
//o.Expect(err).NotTo(o.HaveOccurred())
filePath := []string{"testdata", "logging", "external-log-stores", "fluentd"}
// create secrets if needed
if f.serverAuth {
o.Expect(f.secretName).NotTo(o.BeEmpty())
filePath = append(filePath, "secure")
// create a temporary directory
baseDir := exutil.FixturePath("testdata", "logging")
keysPath := filepath.Join(baseDir, "temp"+getRandomString())
defer exec.Command("rm", "-r", keysPath).Output()
err = os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
//generate certs
cert := certsConf{f.serverName, f.namespace, f.clientPrivateKeyPassphrase}
cert.generateCerts(oc, keysPath)
//create pipelinesecret
f.createPipelineSecret(oc, keysPath)
//create secret for fluentd server
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", f.serverName, "-n", f.namespace, "--from-file=ca-bundle.crt="+keysPath+"/ca.crt", "--from-file=tls.key="+keysPath+"/server.key", "--from-file=tls.crt="+keysPath+"/server.crt", "--from-file=ca.key="+keysPath+"/ca.key").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
filePath = append(filePath, "insecure")
}
// create configmap/deployment/svc
cm := resource{"configmap", f.serverName, f.namespace}
//when prefix is http-, the fluentdserver using http inplugin.
cmFilePrefix := ""
if f.inPluginType == "http" {
cmFilePrefix = "http-"
}
var cmFileName string
if !f.serverAuth {
cmFileName = cmFilePrefix + "configmap.yaml"
} else {
if f.clientAuth {
if f.sharedKey != "" {
cmFileName = "cm-mtls-share.yaml"
} else {
cmFileName = cmFilePrefix + "cm-mtls.yaml"
}
} else {
if f.sharedKey != "" {
cmFileName = "cm-serverauth-share.yaml"
} else {
cmFileName = cmFilePrefix + "cm-serverauth.yaml"
}
}
}
cmFilePath := append(filePath, cmFileName)
cmFile := exutil.FixturePath(cmFilePath...)
cCmCmd := []string{"-f", cmFile, "-n", f.namespace, "-p", "NAMESPACE=" + f.namespace, "-p", "NAME=" + f.serverName}
if f.sharedKey != "" {
cCmCmd = append(cCmCmd, "-p", "SHARED_KEY="+f.sharedKey)
}
err = cm.applyFromTemplate(oc, cCmCmd...)
o.Expect(err).NotTo(o.HaveOccurred())
deploy := resource{"deployment", f.serverName, f.namespace}
deployFilePath := append(filePath, "deployment.yaml")
deployFile := exutil.FixturePath(deployFilePath...)
err = deploy.applyFromTemplate(oc, "-f", deployFile, "-n", f.namespace, "-p", "NAMESPACE="+f.namespace, "-p", "NAME="+f.serverName)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, f.namespace, f.serverName)
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("-n", f.namespace, "deployment", f.serverName, "--name="+f.serverName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
6d196a65-488a-4eed-a85f-be241919d0a8
|
remove
|
['resource', 'fluentdServer']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (f fluentdServer) remove(oc *exutil.CLI) {
resource{"serviceaccount", f.serverName, f.namespace}.clear(oc)
if f.serverAuth {
resource{"secret", f.serverName, f.namespace}.clear(oc)
resource{"secret", f.secretName, f.loggingNS}.clear(oc)
}
resource{"configmap", f.serverName, f.namespace}.clear(oc)
resource{"deployment", f.serverName, f.namespace}.clear(oc)
resource{"svc", f.serverName, f.namespace}.clear(oc)
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
869c2495-a48e-4765-aead-c3ad327b14cd
|
getPodName
|
['"context"']
|
['fluentdServer']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (f fluentdServer) getPodName(oc *exutil.CLI) string {
pods, err := oc.AdminKubeClient().CoreV1().Pods(f.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "component=" + f.serverName})
o.Expect(err).NotTo(o.HaveOccurred())
var names []string
for i := 0; i < len(pods.Items); i++ {
names = append(names, pods.Items[i].Name)
}
return names[0]
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
634b12fc-ff97-4ca1-bfbf-908afe01fc64
|
checkData
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['fluentdServer']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (f fluentdServer) checkData(oc *exutil.CLI, expect bool, filename string) {
cmd := "ls -l /fluentd/log/" + filename
if expect {
err := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
stdout, err := e2eoutput.RunHostCmdWithRetries(f.namespace, f.getPodName(oc), cmd, 3*time.Second, 15*time.Second)
if err != nil {
if strings.Contains(err.Error(), "No such file or directory") {
return false, nil
}
return false, err
}
return strings.Contains(stdout, filename), nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s doesn't exist", filename))
} else {
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
stdout, err := e2eoutput.RunHostCmdWithRetries(f.namespace, f.getPodName(oc), cmd, 3*time.Second, 15*time.Second)
if err != nil {
if strings.Contains(err.Error(), "No such file or directory") {
return true, nil
}
return false, err
}
return strings.Contains(stdout, "No such file or directory"), nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The %s exists", filename))
}
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
5c2c072e-406d-4372-a3eb-f17179dea048
|
getInfrastructureName
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func getInfrastructureName(oc *exutil.CLI) string {
infrastructureName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.infrastructureName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return infrastructureName
}
|
logging
| |||||
function
|
openshift/openshift-tests-private
|
ba6b5782-ff2a-492c-ae92-9e0c21767e7f
|
getDataFromKafkaConsumerPod
|
['"context"', '"encoding/json"', '"fmt"', '"regexp"', '"strings"', '"time"', '"cloud.google.com/go/logging"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
['kafka']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func getDataFromKafkaConsumerPod(oc *exutil.CLI, kafkaNS, consumerPod string) ([]LogEntity, error) {
e2e.Logf("get logs from kakfa consumerPod %s", consumerPod)
var logs []LogEntity
//wait up to 5 minutes for logs appear in consumer pod
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", kafkaNS, consumerPod, "--since=30s", "--tail=30").Output()
if err != nil {
e2e.Logf("error when oc logs consumer pod, continue")
return false, nil
}
for _, line := range strings.Split(strings.TrimSuffix(output, "\n"), "\n") {
//exclude those kafka-consumer logs, for exampe:
//[2024-11-09 07:25:47,953] WARN [Consumer clientId=consumer-console-consumer-99163-1, groupId=console-consumer-99163] Error while fetching metadata with correlation id 165
//: {topic-logging-app=UNKNOWN_TOPIC_OR_PARTITION} (org.apache.kafka.clients.NetworkClient)
r, _ := regexp.Compile(`^{"@timestamp":.*}`)
if r.MatchString(line) {
var log LogEntity
err = json.Unmarshal([]byte(line), &log)
if err != nil {
continue
}
logs = append(logs, log)
} else {
continue
}
}
if len(logs) > 0 {
return true, nil
} else {
e2e.Logf("can not find logs in consumerPod %s, continue", consumerPod)
return false, nil
}
})
if err != nil {
return logs, fmt.Errorf("can not find consumer logs in 3 minutes")
}
return logs, nil
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
493bd6c2-392f-425c-a5a2-584cc4cfad88
|
getDataFromKafkaByNamespace
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func getDataFromKafkaByNamespace(oc *exutil.CLI, kafkaNS, consumerPod, namespace string) ([]LogEntity, error) {
data, err := getDataFromKafkaConsumerPod(oc, kafkaNS, consumerPod)
if err != nil {
return nil, err
}
var logs []LogEntity
for _, log := range data {
if log.Kubernetes.NamespaceName == namespace {
logs = append(logs, log)
}
}
return logs, nil
}
|
logging
| |||||
function
|
openshift/openshift-tests-private
|
5e6bf3eb-a93f-43e8-91cd-cec501991ddd
|
deployZookeeper
|
['"path/filepath"', '"cloud.google.com/go/logging"']
|
['resource', 'kafka']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (k kafka) deployZookeeper(oc *exutil.CLI) {
zookeeperFilePath := exutil.FixturePath("testdata", "logging", "external-log-stores", "kafka", "zookeeper")
//create zookeeper configmap/svc/StatefulSet
configTemplate := filepath.Join(zookeeperFilePath, "configmap.yaml")
if k.authtype == "plaintext-ssl" {
configTemplate = filepath.Join(zookeeperFilePath, "configmap-ssl.yaml")
}
err := resource{"configmap", k.zoosvcName, k.namespace}.applyFromTemplate(oc, "-n", k.namespace, "-f", configTemplate, "-p", "NAME="+k.zoosvcName, "NAMESPACE="+k.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
zoosvcFile := filepath.Join(zookeeperFilePath, "zookeeper-svc.yaml")
zoosvc := resource{"Service", k.zoosvcName, k.namespace}
err = zoosvc.applyFromTemplate(oc, "-n", k.namespace, "-f", zoosvcFile, "-p", "NAME="+k.zoosvcName, "-p", "NAMESPACE="+k.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
zoosfsFile := filepath.Join(zookeeperFilePath, "zookeeper-statefulset.yaml")
zoosfs := resource{"StatefulSet", k.zoosvcName, k.namespace}
err = zoosfs.applyFromTemplate(oc, "-n", k.namespace, "-f", zoosfsFile, "-p", "NAME="+k.zoosvcName, "-p", "NAMESPACE="+k.namespace, "-p", "SERVICENAME="+zoosvc.name, "-p", "CM_NAME="+k.zoosvcName)
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, k.namespace, "app="+k.zoosvcName)
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
6df30641-cf33-416c-b7d3-8fc70d900274
|
deployKafka
|
['"crypto/tls"', '"encoding/json"', '"os"', '"os/exec"', '"path/filepath"', '"cloud.google.com/go/logging"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
['resource', 'kafka']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (k kafka) deployKafka(oc *exutil.CLI) {
kafkaFilePath := exutil.FixturePath("testdata", "logging", "external-log-stores", "kafka")
kafkaConfigmapTemplate := filepath.Join(kafkaFilePath, k.authtype, "kafka-configmap.yaml")
consumerConfigmapTemplate := filepath.Join(kafkaFilePath, k.authtype, "consumer-configmap.yaml")
var keysPath string
if k.authtype == "sasl-ssl" || k.authtype == "plaintext-ssl" {
baseDir := exutil.FixturePath("testdata", "logging")
keysPath = filepath.Join(baseDir, "temp"+getRandomString())
defer exec.Command("rm", "-r", keysPath).Output()
err := os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
generateCertsSH := filepath.Join(kafkaFilePath, "cert_generation.sh")
stdout, err := exec.Command("sh", generateCertsSH, keysPath, k.namespace).Output()
if err != nil {
e2e.Logf("error generating certs: %s", string(stdout))
e2e.Failf("error generating certs: %v", err)
}
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "kafka-cluster-cert", "-n", k.namespace, "--from-file=ca_bundle.jks="+keysPath+"/ca/ca_bundle.jks", "--from-file=cluster.jks="+keysPath+"/cluster/cluster.jks").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
pipelineSecret := resource{"secret", k.pipelineSecret, k.loggingNS}
kafkaClientCert := resource{"secret", "kafka-client-cert", k.namespace}
//create kafka secrets and confimap
cmdPipeline := []string{"secret", "generic", pipelineSecret.name, "-n", pipelineSecret.namespace}
cmdClient := []string{"secret", "generic", kafkaClientCert.name, "-n", kafkaClientCert.namespace}
switch k.authtype {
case "sasl-plaintext":
{
cmdClient = append(cmdClient, "--from-literal=username=admin", "--from-literal=password=admin-secret")
cmdPipeline = append(cmdPipeline, "--from-literal=username=admin", "--from-literal=password=admin-secret")
if k.collectorType == "vector" {
cmdPipeline = append(cmdPipeline, "--from-literal=sasl.enable=True", "--from-literal=sasl.mechanisms=PLAIN")
}
}
case "sasl-ssl":
{
cmdClient = append(cmdClient, "--from-file=ca-bundle.jks="+keysPath+"/ca/ca_bundle.jks", "--from-file=ca-bundle.crt="+keysPath+"/ca/ca_bundle.crt", "--from-file=tls.crt="+keysPath+"/client/client.crt", "--from-file=tls.key="+keysPath+"/client/client.key", "--from-literal=username=admin", "--from-literal=password=admin-secret")
cmdPipeline = append(cmdPipeline, "--from-file=ca-bundle.crt="+keysPath+"/ca/ca_bundle.crt", "--from-literal=username=admin", "--from-literal=password=admin-secret")
switch k.collectorType {
case "fluentd":
{
cmdPipeline = append(cmdPipeline, "--from-literal=sasl_over_ssl=true")
}
case "vector":
{
cmdPipeline = append(cmdPipeline, "--from-literal=sasl.enable=True", "--from-literal=sasl.mechanisms=PLAIN", "--from-file=tls.crt="+keysPath+"/client/client.crt", "--from-file=tls.key="+keysPath+"/client/client.key")
}
}
}
case "plaintext-ssl":
{
cmdClient = append(cmdClient, "--from-file=ca-bundle.jks="+keysPath+"/ca/ca_bundle.jks", "--from-file=ca-bundle.crt="+keysPath+"/ca/ca_bundle.crt", "--from-file=tls.crt="+keysPath+"/client/client.crt", "--from-file=tls.key="+keysPath+"/client/client.key")
cmdPipeline = append(cmdPipeline, "--from-file=ca-bundle.crt="+keysPath+"/ca/ca_bundle.crt", "--from-file=tls.crt="+keysPath+"/client/client.crt", "--from-file=tls.key="+keysPath+"/client/client.key")
}
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args(cmdClient...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kafkaClientCert.WaitForResourceToAppear(oc)
err = oc.AsAdmin().WithoutNamespace().Run("create").Args(cmdPipeline...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
pipelineSecret.WaitForResourceToAppear(oc)
consumerConfigmap := resource{"configmap", "kafka-client", k.namespace}
err = consumerConfigmap.applyFromTemplate(oc, "-n", k.namespace, "-f", consumerConfigmapTemplate, "-p", "NAME="+consumerConfigmap.name, "NAMESPACE="+consumerConfigmap.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
kafkaConfigmap := resource{"configmap", k.kafkasvcName, k.namespace}
err = kafkaConfigmap.applyFromTemplate(oc, "-n", k.namespace, "-f", kafkaConfigmapTemplate, "-p", "NAME="+kafkaConfigmap.name, "NAMESPACE="+kafkaConfigmap.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
//create ClusterRole and ClusterRoleBinding
rbacFile := filepath.Join(kafkaFilePath, "kafka-rbac.yaml")
output, err := oc.AsAdmin().WithoutNamespace().Run("process").Args("-n", k.namespace, "-f", rbacFile, "-p", "NAMESPACE="+k.namespace).OutputToFile(getRandomString() + ".json")
o.Expect(err).NotTo(o.HaveOccurred())
oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", output, "-n", k.namespace).Execute()
//create kafka svc
svcFile := filepath.Join(kafkaFilePath, "kafka-svc.yaml")
svc := resource{"Service", k.kafkasvcName, k.namespace}
err = svc.applyFromTemplate(oc, "-f", svcFile, "-n", svc.namespace, "-p", "NAME="+svc.name, "NAMESPACE="+svc.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
//create kafka StatefulSet
sfsFile := filepath.Join(kafkaFilePath, k.authtype, "kafka-statefulset.yaml")
sfs := resource{"StatefulSet", k.kafkasvcName, k.namespace}
err = sfs.applyFromTemplate(oc, "-f", sfsFile, "-n", k.namespace, "-p", "NAME="+sfs.name, "-p", "NAMESPACE="+sfs.namespace, "-p", "CM_NAME="+k.kafkasvcName)
o.Expect(err).NotTo(o.HaveOccurred())
waitForStatefulsetReady(oc, sfs.namespace, sfs.name)
//create kafka-consumer deployment
deployFile := filepath.Join(kafkaFilePath, k.authtype, "kafka-consumer-deployment.yaml")
deploy := resource{"deployment", "kafka-consumer-" + k.authtype, k.namespace}
err = deploy.applyFromTemplate(oc, "-f", deployFile, "-n", deploy.namespace, "-p", "NAMESPACE="+deploy.namespace, "NAME="+deploy.name, "CM_NAME=kafka-client")
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, deploy.namespace, deploy.name)
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
2819a37c-fbad-45bc-a292-fda231a57463
|
removeZookeeper
|
['resource', 'kafka']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (k kafka) removeZookeeper(oc *exutil.CLI) {
resource{"configmap", k.zoosvcName, k.namespace}.clear(oc)
resource{"svc", k.zoosvcName, k.namespace}.clear(oc)
resource{"statefulset", k.zoosvcName, k.namespace}.clear(oc)
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
c583f591-49b4-43b1-a201-cb085a56b328
|
removeKafka
|
['resource', 'kafka']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (k kafka) removeKafka(oc *exutil.CLI) {
resource{"secret", "kafka-client-cert", k.namespace}.clear(oc)
resource{"secret", "kafka-cluster-cert", k.namespace}.clear(oc)
resource{"secret", k.pipelineSecret, k.loggingNS}.clear(oc)
oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrole/kafka-node-reader").Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrolebinding/kafka-node-reader").Execute()
resource{"configmap", k.kafkasvcName, k.namespace}.clear(oc)
resource{"svc", k.kafkasvcName, k.namespace}.clear(oc)
resource{"statefulset", k.kafkasvcName, k.namespace}.clear(oc)
resource{"configmap", "kafka-client", k.namespace}.clear(oc)
resource{"deployment", "kafka-consumer-" + k.authtype, k.namespace}.clear(oc)
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
1a4399ab-04e0-43e8-860a-146be2161371
|
deploy
|
['"context"', '"encoding/base64"', '"fmt"', '"io"', '"path/filepath"', '"strings"', '"time"', '"cloud.google.com/go/logging"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
['SubscriptionObjects', 'CatalogSourceObjects', 'resource', 'kafka']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (amqi *amqInstance) deploy(oc *exutil.CLI) {
e2e.Logf("deploy amq instance")
//initialize kakfa vars
if amqi.name == "" {
amqi.name = "my-cluster"
}
if amqi.namespace == "" {
e2e.Failf("error, please define a namespace for amqstream instance")
}
if amqi.user == "" {
amqi.user = "my-user"
}
if amqi.topicPrefix == "" {
amqi.topicPrefix = "topic-logging"
}
if amqi.instanceType == "" {
amqi.instanceType = "kafka-sasl-cluster"
}
loggingBaseDir := exutil.FixturePath("testdata", "logging")
operatorDeployed := false
// Wait csv appears up to 3 minutes
wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", "openshift-operators").Output()
if err != nil {
return false, err
}
if strings.Contains(output, "amqstreams") {
operatorDeployed = true
return true, nil
}
return false, nil
})
if !operatorDeployed {
e2e.Logf("deploy amqstream operator")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("operatorhub/cluster", `-ojsonpath='{.status.sources[?(@.name=="redhat-operators")].disabled}'`).Output()
if err != nil {
g.Skip("Can not detect the catalog source/redhat-operators status")
}
if output == "true" {
g.Skip("catalog source/redhat-operators is disabled")
}
catsrc := CatalogSourceObjects{"stable", "redhat-operators", "openshift-marketplace"}
amqs := SubscriptionObjects{
OperatorName: "amq-streams-cluster-operator",
Namespace: amqi.namespace,
PackageName: "amq-streams",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "singlenamespace-og.yaml"),
CatalogSource: catsrc,
}
amqs.SubscribeOperator(oc)
if isFipsEnabled(oc) {
//disable FIPS_MODE due to "java.io.IOException: getPBEAlgorithmParameters failed: PBEWithHmacSHA256AndAES_256 AlgorithmParameters not available"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("sub/"+amqs.PackageName, "-n", amqs.Namespace, "-p", "{\"spec\": {\"config\": {\"env\": [{\"name\": \"FIPS_MODE\", \"value\": \"disabled\"}]}}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
// before creating kafka, check the existence of crd kafkas.kafka.strimzi.io
checkResource(oc, true, true, "kafka.strimzi.io", []string{"crd", "kafkas.kafka.strimzi.io", "-ojsonpath={.spec.group}"})
kafka := resource{"kafka", amqi.name, amqi.namespace}
kafkaTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", amqi.instanceType+".yaml")
kafka.applyFromTemplate(oc, "-n", kafka.namespace, "-f", kafkaTemplate, "-p", "NAME="+kafka.name)
// wait for kafka cluster to be ready
waitForPodReadyWithLabel(oc, kafka.namespace, "app.kubernetes.io/instance="+kafka.name)
if amqi.instanceType == "kafka-sasl-cluster" {
e2e.Logf("deploy kafka user")
kafkaUser := resource{"kafkauser", amqi.user, amqi.namespace}
kafkaUserTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", "kafka-sasl-user.yaml")
kafkaUser.applyFromTemplate(oc, "-n", kafkaUser.namespace, "-f", kafkaUserTemplate, "-p", "NAME="+amqi.user, "-p", "KAFKA_NAME="+amqi.name, "-p", "TOPIC_PREFIX="+amqi.topicPrefix)
// get user password from secret my-user
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
secrets, err := oc.AdminKubeClient().CoreV1().Secrets(kafkaUser.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/instance=" + kafkaUser.name})
if err != nil {
e2e.Logf("failed to list secret, continue")
return false, nil
}
count := len(secrets.Items)
if count == 0 {
e2e.Logf("canot not find the secret %s, continues", kafkaUser.name)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Can not find the kafka user Secret %s", amqi.user))
e2e.Logf("set kafka user password")
amqi.password, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("get").Args("secret", amqi.user, "-n", amqi.namespace, "-o", "jsonpath={.data.password}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
temp, err := base64.StdEncoding.DecodeString(amqi.password)
o.Expect(err).NotTo(o.HaveOccurred())
amqi.password = string(temp)
// get extranal route of amqstream kafka
e2e.Logf("get kafka route")
amqi.route = getRouteAddress(oc, amqi.namespace, amqi.name+"-kafka-external-bootstrap")
amqi.route = amqi.route + ":443"
// get ca for route
e2e.Logf("get kafka routeCA")
amqi.routeCA, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", amqi.name+"-cluster-ca-cert", "-n", amqi.namespace, "-o", `jsonpath={.data.ca\.crt}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
temp, err = base64.StdEncoding.DecodeString(amqi.routeCA)
o.Expect(err).NotTo(o.HaveOccurred())
amqi.routeCA = string(temp)
}
// get internal service URL of amqstream kafka
amqi.service = amqi.name + "-kafka-bootstrap." + amqi.namespace + ".svc:9092"
e2e.Logf("amqstream deployed")
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
88da8b66-6f13-44fb-8547-3d1dabdd1f4e
|
destroy
|
['kafka']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (amqi *amqInstance) destroy(oc *exutil.CLI) {
e2e.Logf("delete kakfa resources")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("job", "--all", "-n", amqi.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("kafkauser", "--all", "-n", amqi.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("kafkatopic", "--all", "-n", amqi.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("kafka", amqi.name, "-n", amqi.namespace).Execute()
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
574f710c-0d55-4e9b-94a9-e284d738987c
|
createTopicAndConsumber
|
['"context"', '"encoding/base64"', '"path/filepath"', '"strings"', '"cloud.google.com/go/logging"']
|
['resource', 'kafka']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (amqi amqInstance) createTopicAndConsumber(oc *exutil.CLI, topicName string) string {
e2e.Logf("create kakfa topic %s and consume pod", topicName)
if !strings.HasPrefix(topicName, amqi.topicPrefix) {
e2e.Failf("error, the topic %s must has prefix %s", topicName, amqi.topicPrefix)
}
var (
consumerPodName string
loggingBaseDir = exutil.FixturePath("testdata", "logging")
topicTemplate = filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", "kafka-topic.yaml")
topic = resource{"Kafkatopic", topicName, amqi.namespace}
)
err := topic.applyFromTemplate(oc, "-n", topic.namespace, "-f", topicTemplate, "-p", "NAMESPACE="+topic.namespace, "-p", "NAME="+topic.name, "CLUSTER_NAME="+amqi.name)
o.Expect(err).NotTo(o.HaveOccurred())
if amqi.instanceType == "kafka-sasl-cluster" {
//create consumers sasl.client.property
truststorePassword, err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("get").Args("secret", amqi.name+"-cluster-ca-cert", "-n", amqi.namespace, "-o", `jsonpath={.data.ca\.password}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
temp, err := base64.StdEncoding.DecodeString(truststorePassword)
o.Expect(err).NotTo(o.HaveOccurred())
truststorePassword = string(temp)
consumerConfigTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", "kafka-sasl-consumers-config.yaml")
consumerConfig := resource{"configmap", "client-property-" + amqi.user, amqi.namespace}
err = consumerConfig.applyFromTemplate(oc.NotShowInfo(), "-n", consumerConfig.namespace, "-f", consumerConfigTemplate, "-p", "NAME="+consumerConfig.name, "-p", "USER="+amqi.user, "-p", "PASSWORD="+amqi.password, "-p", "TRUSTSTORE_PASSWORD="+truststorePassword, "-p", "KAFKA_NAME="+amqi.name)
o.Expect(err).NotTo(o.HaveOccurred())
//create consumer pod
consumerTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", "kafka-sasl-consumer-job.yaml")
consumer := resource{"job", topicName + "-consumer", amqi.namespace}
err = consumer.applyFromTemplate(oc, "-n", consumer.namespace, "-f", consumerTemplate, "-p", "NAME="+consumer.name, "-p", "CLUSTER_NAME="+amqi.name, "-p", "TOPIC_NAME="+topicName, "-p", "CLIENT_CONFIGMAP_NAME="+consumerConfig.name, "-p", "CA_SECRET_NAME="+amqi.name+"-cluster-ca-cert")
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, amqi.namespace, "job-name="+consumer.name)
consumerPods, err := oc.AdminKubeClient().CoreV1().Pods(amqi.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "job-name=" + topicName + "-consumer"})
o.Expect(err).NotTo(o.HaveOccurred())
consumerPodName = consumerPods.Items[0].Name
}
if amqi.instanceType == "kafka-no-auth-cluster" {
//create consumer pod
consumerTemplate := filepath.Join(loggingBaseDir, "external-log-stores", "kafka", "amqstreams", "kafka-no-auth-consumer-job.yaml")
consumer := resource{"job", topicName + "-consumer", amqi.namespace}
err = consumer.applyFromTemplate(oc, "-n", consumer.namespace, "-f", consumerTemplate, "-p", "NAME="+consumer.name, "-p", "CLUSTER_NAME="+amqi.name, "-p", "TOPIC_NAME="+topicName)
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, amqi.namespace, "job-name="+consumer.name)
consumerPods, err := oc.AdminKubeClient().CoreV1().Pods(amqi.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "job-name=" + topicName + "-consumer"})
o.Expect(err).NotTo(o.HaveOccurred())
consumerPodName = consumerPods.Items[0].Name
}
if consumerPodName == "" {
e2e.Logf("can not get comsumer pod for the topic %s", topicName)
} else {
e2e.Logf("found the comsumer pod %s ", consumerPodName)
}
return consumerPodName
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
ec681e46-644f-4ef0-9a14-ac53d256585e
|
deploy
|
['"os"']
|
['resource', 'eventRouter']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (e eventRouter) deploy(oc *exutil.CLI, optionalParameters ...string) {
parameters := []string{"-f", e.template, "-l", "app=eventrouter", "-p", "NAME=" + e.name, "NAMESPACE=" + e.namespace}
if len(optionalParameters) > 0 {
parameters = append(parameters, optionalParameters...)
}
file, processErr := processTemplate(oc, parameters...)
defer os.Remove(file)
if processErr != nil {
e2e.Failf("error processing file: %v", processErr)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", file, "-n", e.namespace).Execute()
if err != nil {
e2e.Failf("error deploying eventrouter: %v", err)
}
resource{"deployment", e.name, e.namespace}.WaitForResourceToAppear(oc)
WaitForDeploymentPodsToBeReady(oc, e.namespace, e.name)
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
1339e1c7-b373-4471-bd4c-24840f44c27e
|
delete
|
['resource', 'eventRouter']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (e eventRouter) delete(oc *exutil.CLI) {
resources := []resource{{"deployment", e.name, e.namespace}, {"configmaps", e.name, e.namespace}, {"serviceaccounts", e.name, e.namespace}}
for _, r := range resources {
r.clear(oc)
}
oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrole", e.name+"-reader").Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrolebindings", e.name+"-reader-binding").Execute()
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
066fa31b-cfc4-4dee-9928-bf24bc8c4ade
|
createSecretForGCL
|
['"encoding/json"', '"os"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func createSecretForGCL(oc *exutil.CLI, name, namespace string) error {
// get gcp-credentials from env var GOOGLE_APPLICATION_CREDENTIALS
gcsCred := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", name, "-n", namespace, "--from-file=google-application-credentials.json="+gcsCred).Execute()
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
e041992c-cd8f-4091-9f39-f1c42611e765
|
getGCPProjectID
|
['"encoding/json"', '"fmt"', '"os"']
|
['googleApplicationCredentials']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func getGCPProjectID(oc *exutil.CLI) (string, error) {
platform := exutil.CheckPlatform(oc)
if platform == "gcp" {
return exutil.GetGcpProjectID(oc)
}
credentialFile, present := os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !present {
g.Skip("Skip for the platform is not GCP and there is no GCP credentials")
}
file, err := os.ReadFile(credentialFile)
if err != nil {
return "", fmt.Errorf("can't read google application credentials: %v", err)
}
var gac googleApplicationCredentials
err = json.Unmarshal(file, &gac)
return gac.ProjectID, err
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
6e41354c-e2f2-4708-9f5f-ad28ab38ca86
|
listLogEntries
|
['"context"', '"fmt"', '"time"', '"cloud.google.com/go/logging"', '"cloud.google.com/go/logging/logadmin"', '"google.golang.org/api/iterator"']
|
['googleCloudLogging']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (gcl googleCloudLogging) listLogEntries(queryString string) ([]*logging.Entry, error) {
ctx := context.Background()
adminClient, err := logadmin.NewClient(ctx, gcl.projectID)
if err != nil {
e2e.Logf("Failed to create logadmin client: %v", err)
}
defer adminClient.Close()
var entries []*logging.Entry
lastHour := time.Now().Add(-1 * time.Hour).Format(time.RFC3339)
filter := fmt.Sprintf(`logName = "projects/%s/logs/%s" AND timestamp > "%s"`, gcl.projectID, gcl.logName, lastHour)
if len(queryString) > 0 {
filter += queryString
}
iter := adminClient.Entries(ctx,
logadmin.Filter(filter),
// Get most recent entries first.
logadmin.NewestFirst(),
)
// Fetch the most recent 5 entries.
for len(entries) < 5 {
entry, err := iter.Next()
if err == iterator.Done {
return entries, nil
}
if err != nil {
return nil, err
}
entries = append(entries, entry)
}
return entries, nil
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
bb0f0077-7cc9-429c-882b-61cac1bc8554
|
getLogByType
|
['"cloud.google.com/go/logging"']
|
['googleCloudLogging']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (gcl googleCloudLogging) getLogByType(logType string) ([]*logging.Entry, error) {
searchString := " AND jsonPayload.log_type = \"" + logType + "\""
return gcl.listLogEntries(searchString)
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
d14c05b2-602e-4cec-92b7-9e5ed062ca07
|
getLogByNamespace
|
['"cloud.google.com/go/logging"']
|
['googleCloudLogging']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (gcl googleCloudLogging) getLogByNamespace(namespace string) ([]*logging.Entry, error) {
searchString := " AND jsonPayload.kubernetes.namespace_name = \"" + namespace + "\""
return gcl.listLogEntries(searchString)
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
c84f7ee6-0576-42cc-a4e8-7d9d06044e39
|
extractGoogleCloudLoggingLogs
|
['"encoding/json"', '"cloud.google.com/go/logging"', '"google.golang.org/protobuf/types/known/structpb"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func extractGoogleCloudLoggingLogs(gclLogs []*logging.Entry) ([]LogEntity, error) {
var (
logs []LogEntity
log LogEntity
)
for _, item := range gclLogs {
if value, ok := item.Payload.(*structpb.Struct); ok {
v, err := value.MarshalJSON()
if err != nil {
return nil, err
}
//e2e.Logf("\noriginal log:\n%s\n\n", string(v))
err = json.Unmarshal(v, &log)
if err != nil {
return nil, err
}
logs = append(logs, log)
}
}
return logs, nil
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
53741910-8854-4c31-83bb-188588f971fe
|
removeLogs
|
['"context"', '"cloud.google.com/go/logging/logadmin"']
|
['googleCloudLogging']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (gcl googleCloudLogging) removeLogs() error {
ctx := context.Background()
adminClient, err := logadmin.NewClient(ctx, gcl.projectID)
if err != nil {
e2e.Logf("Failed to create logadmin client: %v", err)
}
defer adminClient.Close()
return adminClient.DeleteLog(ctx, gcl.logName)
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
696f5e0d-662a-46f6-be02-910c2a4266bb
|
waitForLogsAppearByType
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['googleCloudLogging']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (gcl googleCloudLogging) waitForLogsAppearByType(logType string) error {
return wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByType(logType)
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
fc2043f2-1da7-4b98-bd81-c772234804ec
|
waitForLogsAppearByNamespace
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['googleCloudLogging']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func (gcl googleCloudLogging) waitForLogsAppearByNamespace(namespace string) error {
return wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByNamespace(namespace)
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
e8bb24cf-fbf0-4328-b2c3-0b8cdb79b1bf
|
getIndexImageTag
|
['"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func getIndexImageTag(oc *exutil.CLI) (string, error) {
version, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-ojsonpath={.status.desired.version}").Output()
if err != nil {
return "", err
}
major := strings.Split(version, ".")[0]
minor := strings.Split(version, ".")[1]
newMinor, err := strconv.Atoi(minor)
if err != nil {
return "", err
}
return major + "." + strconv.Itoa(newMinor-1), nil
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
a98bfb71-aea4-4f57-97fb-829594f46b06
|
getExtLokiSecret
|
['"fmt"', '"os"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func getExtLokiSecret() (string, string, error) {
glokiUser := os.Getenv("GLOKIUSER")
glokiPwd := os.Getenv("GLOKIPWD")
if glokiUser == "" || glokiPwd == "" {
return "", "", fmt.Errorf("GLOKIUSER or GLOKIPWD environment variable is not set")
}
return glokiUser, glokiPwd, nil
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
7ea6cfd4-30f5-431e-8f5d-d7ab525494fc
|
checkCiphers
|
['"context"', '"fmt"', '"strings"', '"time"', '"cloud.google.com/go/logging"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func checkCiphers(oc *exutil.CLI, tlsVer string, ciphers []string, server string, caFile string, cloNS string, timeInSec int) error {
delay := time.Duration(timeInSec) * time.Second
for _, cipher := range ciphers {
e2e.Logf("Testing %s...", cipher)
clPod, err := oc.AdminKubeClient().CoreV1().Pods(cloNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=cluster-logging-operator"})
if err != nil {
return fmt.Errorf("failed to get pods: %w", err)
}
cmd := fmt.Sprintf("openssl s_client -%s -cipher %s -CAfile %s -connect %s", tlsVer, cipher, caFile, server)
result, err := e2eoutput.RunHostCmdWithRetries(cloNS, clPod.Items[0].Name, cmd, 3*time.Second, 30*time.Second)
if err != nil {
return fmt.Errorf("failed to run command: %w", err)
}
if strings.Contains(string(result), ":error:") {
errorStr := strings.Split(string(result), ":")[5]
return fmt.Errorf("error: NOT SUPPORTED (%s)", errorStr)
} else if strings.Contains(string(result), fmt.Sprintf("Cipher is %s", cipher)) || strings.Contains(string(result), "Cipher :") {
e2e.Logf("SUPPORTED")
} else {
errorStr := string(result)
return fmt.Errorf("error: UNKNOWN RESPONSE %s", errorStr)
}
time.Sleep(delay)
}
return nil
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
b92318a2-5d3f-441c-a640-60f5805766d1
|
checkTLSVer
|
['"context"', '"fmt"', '"strings"', '"time"', '"cloud.google.com/go/logging"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func checkTLSVer(oc *exutil.CLI, tlsVer string, server string, caFile string, cloNS string) error {
e2e.Logf("Testing TLS %s ", tlsVer)
clPod, err := oc.AdminKubeClient().CoreV1().Pods(cloNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=cluster-logging-operator"})
if err != nil {
return fmt.Errorf("failed to get pods: %w", err)
}
cmd := fmt.Sprintf("openssl s_client -%s -CAfile %s -connect %s", tlsVer, caFile, server)
result, err := e2eoutput.RunHostCmdWithRetries(cloNS, clPod.Items[0].Name, cmd, 3*time.Second, 30*time.Second)
if err != nil {
return fmt.Errorf("failed to run command: %w", err)
}
if strings.Contains(string(result), ":error:") {
errorStr := strings.Split(string(result), ":")[5]
return fmt.Errorf("error: NOT SUPPORTED (%s)", errorStr)
} else if strings.Contains(string(result), "Cipher is ") || strings.Contains(string(result), "Cipher :") {
e2e.Logf("SUPPORTED")
} else {
errorStr := string(result)
return fmt.Errorf("error: UNKNOWN RESPONSE %s", errorStr)
}
return nil
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
9cfe818d-2f93-4964-84a8-4f469508f102
|
checkTLSProfile
|
['"io"', '"net/http"', 'metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func checkTLSProfile(oc *exutil.CLI, profile string, algo string, server string, caFile string, cloNS string, timeInSec int) bool {
var ciphers []string
var tlsVer string
if profile == "modern" {
e2e.Logf("Modern profile is currently not supported, please select from old, intermediate, custom")
return false
}
if isFipsEnabled(oc) {
switch profile {
case "old":
e2e.Logf("Checking old profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking old profile with TLS v1.2")
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-ECDSA-AES128-SHA256", "ECDHE-ECDSA-AES128-SHA", "ECDHE-ECDSA-AES256-SHA384", "ECDHE-ECDSA-AES256-SHA"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-GCM-SHA256"}
}
tlsVer = "tls1_2"
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
case "intermediate":
e2e.Logf("Setting alogorith to %s", algo)
e2e.Logf("Checking intermediate profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking intermediate ciphers with TLS v1.3")
// as openssl-3.0.7-24.el9 in CLO pod failed as below, no such issue in openssl-3.0.9-2.fc38. use TLS 1.3 to test TSL 1.2 here.
// openssl s_client -tls1_2 -cipher ECDHE-RSA-AES128-GCM-SHA256 -CAfile /run/secrets/kubernetes.io/serviceaccount/service-ca.crt -connect lokistack-sample-gateway-http:8081
// 20B4A391FFFF0000:error:1C8000E9:Provider routines:kdf_tls1_prf_derive:ems not enabled:providers/implementations/kdfs/tls1_prf.c:200:
// 20B4A391FFFF0000:error:0A08010C:SSL routines:tls1_PRF:unsupported:ssl/t1_enc.c:83:
tlsVer = "tls1_3"
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-GCM-SHA384"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking intermediate profile with TLS v1.1")
tlsVer = "tls1_1"
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).To(o.HaveOccurred())
case "custom":
e2e.Logf("Setting alogorith to %s", algo)
e2e.Logf("Checking custom profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking custom profile ciphers with TLS v1.3")
// as openssl-3.0.7-24.el9 in CLO pod failed as below, no such issue in openssl-3.0.9-2.fc38. use TLS 1.3 to test TSL 1.2 here.
// openssl s_client -tls1_2 -cipher ECDHE-RSA-AES128-GCM-SHA256 -CAfile /run/secrets/kubernetes.io/serviceaccount/service-ca.crt -connect lokistack-sample-gateway-http:8081
// 20B4A391FFFF0000:error:1C8000E9:Provider routines:kdf_tls1_prf_derive:ems not enabled:providers/implementations/kdfs/tls1_prf.c:200:
// 20B4A391FFFF0000:error:0A08010C:SSL routines:tls1_PRF:unsupported:ssl/t1_enc.c:83:
tlsVer = "tls1_3"
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-ECDSA-AES128-GCM-SHA256"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES128-GCM-SHA256"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking ciphers on in custom profile with TLS v1.3")
tlsVer = "tls1_3"
if algo == "ECDSA" {
ciphers = []string{"TLS_AES_128_GCM_SHA256"}
} else if algo == "RSA" {
ciphers = []string{"TLS_AES_128_GCM_SHA256"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).To(o.HaveOccurred())
}
} else {
switch profile {
case "old":
e2e.Logf("Checking old profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking old profile with TLS v1.2")
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-ECDSA-AES128-SHA256", "ECDHE-ECDSA-AES128-SHA", "ECDHE-ECDSA-AES256-SHA384", "ECDHE-ECDSA-AES256-SHA"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-SHA256", "ECDHE-RSA-AES128-SHA", "ECDHE-RSA-AES256-SHA", "AES128-GCM-SHA256", "AES256-GCM-SHA384", "AES128-SHA256", "AES128-SHA", "AES256-SHA"}
}
tlsVer = "tls1_2"
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking old profile with TLS v1.1")
// remove these ciphers as openssl-3.0.7-24.el9 s_client -tls1_1 -cipher <ciphers> failed.
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-SHA", "ECDHE-ECDSA-AES256-SHA"}
} else if algo == "RSA" {
ciphers = []string{"AES128-SHA", "AES256-SHA"}
}
tlsVer = "tls1_1"
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
case "intermediate":
e2e.Logf("Setting alogorith to %s", algo)
e2e.Logf("Checking intermediate profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking intermediate profile ciphers with TLS v1.2")
tlsVer = "tls1_2"
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-GCM-SHA384"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking intermediate profile with TLS v1.1")
// replace checkCiphers with checkTLSVer as we needn't check all v1.1 Ciphers
tlsVer = "tls1_1"
err = checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).To(o.HaveOccurred())
case "custom":
e2e.Logf("Setting alogorith to %s", algo)
e2e.Logf("Checking custom profile with TLS v1.3")
tlsVer = "tls1_3"
err := checkTLSVer(oc, tlsVer, server, caFile, cloNS)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking custom profile with TLS v1.2")
tlsVer = "tls1_2"
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256"}
} else if algo == "RSA" {
ciphers = []string{"ECDHE-RSA-AES128-GCM-SHA256"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Checking ciphers not in custom profile with TLS v1.3")
tlsVer = "tls1_3"
if algo == "ECDSA" {
ciphers = []string{"ECDHE-ECDSA-AES128-GCM-SHA256"}
} else if algo == "RSA" {
ciphers = []string{"TLS_AES_128_GCM_SHA256"}
}
err = checkCiphers(oc, tlsVer, ciphers, server, caFile, cloNS, timeInSec)
o.Expect(err).To(o.HaveOccurred())
}
}
return true
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
d4a54562-e912-4b6f-b281-b9369998e875
|
checkCollectorConfiguration
|
['"os"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func checkCollectorConfiguration(oc *exutil.CLI, ns, cmName string, searchStrings ...string) (bool, error) {
// Parse the vector.toml file
dirname := "/tmp/" + oc.Namespace() + "-vectortoml"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
if err != nil {
return false, err
}
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("configmap/"+cmName, "-n", ns, "--confirm", "--to="+dirname).Output()
if err != nil {
return false, err
}
filename := filepath.Join(dirname, "vector.toml")
file, err := os.Open(filename)
if err != nil {
return false, err
}
defer file.Close()
content, err := os.ReadFile(filename)
if err != nil {
return false, err
}
for _, s := range searchStrings {
if !strings.Contains(string(content), s) {
e2e.Logf("can't find %s in vector.toml", s)
return false, nil
}
}
return true, nil
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
ef0ffb7d-6bb7-40b3-8da7-c7274e3942bc
|
checkOperatorsRunning
|
['"fmt"', '"io"', '"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func checkOperatorsRunning(oc *exutil.CLI) (bool, error) {
jpath := `{range .items[*]}{.metadata.name}:{.status.conditions[?(@.type=='Available')].status}{':'}{.status.conditions[?(@.type=='Degraded')].status}{'\n'}{end}`
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperators.config.openshift.io", "-o", "jsonpath="+jpath).Output()
if err != nil {
return false, fmt.Errorf("failed to execute 'oc get clusteroperators.config.openshift.io' command: %v", err)
}
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
for _, line := range lines {
e2e.Logf("%s", line)
parts := strings.Split(line, ":")
available := parts[1] == "True"
degraded := parts[2] == "False"
if !available || !degraded {
return false, nil
}
}
return true, nil
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
96198c9f-669c-409a-a09c-3914589e64c2
|
waitForOperatorsRunning
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func waitForOperatorsRunning(oc *exutil.CLI) {
e2e.Logf("Wait a minute to allow the cluster to reconcile the config changes.")
time.Sleep(1 * time.Minute)
err := wait.PollUntilContextTimeout(context.Background(), 3*time.Minute, 21*time.Minute, true, func(context.Context) (done bool, err error) {
return checkOperatorsRunning(oc)
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to wait for operators to be running: %v", err))
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
300fe5eb-d84c-44a7-8704-867cc76f8e46
|
doHTTPRequest
|
['"crypto/tls"', '"fmt"', '"io"', '"net/http"', '"net/url"', '"path"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func doHTTPRequest(header http.Header, address, path, query, method string, quiet bool, attempts int, requestBody io.Reader, expectedStatusCode int) ([]byte, error) {
us, err := buildURL(address, path, query)
if err != nil {
return nil, err
}
if !quiet {
e2e.Logf("the URL is: %s", us)
}
req, err := http.NewRequest(strings.ToUpper(method), us, requestBody)
if err != nil {
return nil, err
}
req.Header = header
var tr *http.Transport
proxy := getProxyFromEnv()
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
o.Expect(err).NotTo(o.HaveOccurred())
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyURL(proxyURL),
}
} else {
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
}
client := &http.Client{Transport: tr}
var resp *http.Response
success := false
for attempts > 0 {
attempts--
resp, err = client.Do(req)
if err != nil {
e2e.Logf("error sending request %v", err)
continue
}
if resp.StatusCode != expectedStatusCode {
buf, _ := io.ReadAll(resp.Body) // nolint
e2e.Logf("Error response from server: %s %s (%v), attempts remaining: %d", resp.Status, string(buf), err, attempts)
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body: %v", err)
}
// sleep 5 second before doing next request
time.Sleep(5 * time.Second)
continue
}
success = true
break
}
if !success {
return nil, fmt.Errorf("run out of attempts while querying the server")
}
defer func() {
if err := resp.Body.Close(); err != nil {
e2e.Logf("error closing body: %v", err)
}
}()
return io.ReadAll(resp.Body)
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
165ec376-1882-4dff-b828-6c31615097a8
|
buildURL
|
['"net/url"', '"path"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func buildURL(u, p, q string) (string, error) {
url, err := url.Parse(u)
if err != nil {
return "", err
}
url.Path = path.Join(url.Path, p)
url.RawQuery = q
return url.String(), nil
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
3daa8f04-f6c4-4bdf-a1cc-4f56b05fbee3
|
GetIPVersionStackType
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func GetIPVersionStackType(oc *exutil.CLI) (ipvStackType string) {
svcNetwork, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.serviceNetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Count(svcNetwork, ":") >= 2 && strings.Count(svcNetwork, ".") >= 2 {
ipvStackType = "dualstack"
} else if strings.Count(svcNetwork, ":") >= 2 {
ipvStackType = "ipv6single"
} else if strings.Count(svcNetwork, ".") >= 2 {
ipvStackType = "ipv4single"
}
e2e.Logf("The test cluster IP-version Stack type is :\"%s\".", ipvStackType)
return ipvStackType
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
e04c841e-46d0-40c0-b44f-60ae57fe2b8a
|
convertInterfaceToArray
|
['"fmt"', '"reflect"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func convertInterfaceToArray(t interface{}) []string {
var data []string
switch reflect.TypeOf(t).Kind() {
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(t)
for i := 0; i < s.Len(); i++ {
data = append(data, fmt.Sprint(s.Index(i)))
}
}
return data
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
59ff6609-05b0-44d6-81b3-16ff7211d2e9
|
postDataToHttpserver
|
['"context"', '"encoding/json"', '"io"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func postDataToHttpserver(oc *exutil.CLI, clfNS string, httpURL string, postJsonString string) bool {
collectorPods, err := oc.AdminKubeClient().CoreV1().Pods(clfNS).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/component=collector"})
if err != nil || len(collectorPods.Items) < 1 {
e2e.Logf("failed to get pods by label app.kubernetes.io/component=collector")
return false
}
//ToDo, send logs to httpserver using service ca, oc get cm/openshift-service-ca.crt -o json |jq '.data."service-ca.crt"'
cmd := `curl -s -k -w "%{http_code}" ` + httpURL + " -d '" + postJsonString + "'"
result, err := e2eoutput.RunHostCmdWithRetries(clfNS, collectorPods.Items[0].Name, cmd, 3*time.Second, 30*time.Second)
if err != nil {
e2e.Logf("Show more status as data can not be sent to httpserver")
oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", clfNS, "endpoints").Output()
oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", clfNS, "pods").Output()
return false
}
if result == "200" {
return true
} else {
e2e.Logf("Show result as return code is not 200")
e2e.Logf("result=%v", result)
return false
}
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
cb1bd31c-f0fb-4afb-9abc-9dcf8c224537
|
rapidastScan
|
['"context"', '"fmt"', '"os"', '"os/exec"', '"path/filepath"', '"regexp"', '"strings"', '"time"', '"cloud.google.com/go/logging"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['resource']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func rapidastScan(oc *exutil.CLI, ns, configFile string, scanPolicyFile string, apiGroupName string) (bool, error) {
//update the token and create a new config file
content, err := os.ReadFile(configFile)
jobName := "rapidast-" + getRandomString()
if err != nil {
e2e.Logf("rapidastScan abort! Open file %s failed", configFile)
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-user", "cluster-admin", fmt.Sprintf("system:serviceaccount:%s:default", ns)).Execute()
oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "cluster-admin", fmt.Sprintf("system:serviceaccount:%s:default", ns)).Execute()
token := getSAToken(oc, "default", ns)
originConfig := string(content)
targetConfig := strings.Replace(originConfig, "Bearer sha256~xxxxxxxx", "Bearer "+token, -1)
newConfigFile := "/tmp/logdast" + getRandomString()
f, err := os.Create(newConfigFile)
if err != nil {
e2e.Logf("rapidastScan abort! prepare configfile %s failed", newConfigFile)
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
defer f.Close()
defer exec.Command("rm", newConfigFile).Output()
f.WriteString(targetConfig)
//Create configmap
err = oc.WithoutNamespace().Run("create").Args("-n", ns, "configmap", jobName, "--from-file=rapidastconfig.yaml="+newConfigFile, "--from-file=customscan.policy="+scanPolicyFile).Execute()
if err != nil {
e2e.Logf("rapidastScan abort! create configmap rapidast-configmap failed")
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
//Create job
loggingBaseDir := exutil.FixturePath("testdata", "logging")
jobTemplate := filepath.Join(loggingBaseDir, "rapidast/job_rapidast.yaml")
rapidastJob := resource{"job", jobName, ns}
err = rapidastJob.applyFromTemplate(oc, "-f", jobTemplate, "-n", ns, "-p", "NAME="+jobName)
if err != nil {
e2e.Logf("rapidastScan abort! create rapidast job failed")
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
//Waiting up to 3 minutes until pod Failed or Success
wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 3*time.Minute, true, func(context.Context) (done bool, err error) {
jobStatus, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ns, "pod", "-l", "job-name="+jobName, "-ojsonpath={.items[0].status.phase}").Output()
e2e.Logf(" rapidast Job status %s ", jobStatus)
if err1 != nil {
return false, nil
}
if jobStatus == "Pending" || jobStatus == "Running" {
return false, nil
}
if jobStatus == "Failed" {
e2e.Logf("rapidast-job %s failed", jobName)
return true, nil
}
if jobStatus == "Succeeded" {
return true, nil
}
return false, nil
})
// Get the rapidast pod name
jobPods, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: "job-name=" + jobName})
if err != nil {
e2e.Logf("rapidastScan abort! can not find rapidast scan job ")
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
podLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ns, jobPods.Items[0].Name).Output()
if err != nil {
e2e.Logf("rapidastScan abort! can not fetch logs from rapidast-scan pod %s", jobPods.Items[0].Name)
e2e.Logf("rapidast result: riskHigh=unknown riskMedium=unknown")
return false, err
}
// Copy DAST Report into $ARTIFACT_DIR
artifactAvaiable := true
artifactdirPath := os.Getenv("ARTIFACT_DIR")
if artifactdirPath == "" {
artifactAvaiable = false
}
info, err := os.Stat(artifactdirPath)
if err != nil {
e2e.Logf("%s doesn't exist", artifactdirPath)
artifactAvaiable = false
} else if !info.IsDir() {
e2e.Logf("%s isn't a directory", artifactdirPath)
artifactAvaiable = false
}
if artifactAvaiable {
rapidastResultsSubDir := artifactdirPath + "/rapiddastresultslogging"
err = os.MkdirAll(rapidastResultsSubDir, 0755)
if err != nil {
e2e.Logf("failed to create %s", rapidastResultsSubDir)
}
artifactFile := rapidastResultsSubDir + "/" + apiGroupName + "_rapidast.result.txt"
e2e.Logf("Write report into %s", artifactFile)
f1, err := os.Create(artifactFile)
if err != nil {
e2e.Logf("failed to create artifactFile %s", artifactFile)
}
defer f1.Close()
_, err = f1.WriteString(podLogs)
if err != nil {
e2e.Logf("failed to write logs into artifactFile %s", artifactFile)
}
} else {
// print pod logs if artifactdirPath is not writable
e2e.Logf("#oc logs -n %s %s \n %s", jobPods.Items[0].Name, ns, podLogs)
}
//return false, if high risk is reported
podLogA := strings.Split(podLogs, "\n")
riskHigh := 0
riskMedium := 0
re1 := regexp.MustCompile(`"riskdesc": .*High`)
re2 := regexp.MustCompile(`"riskdesc": .*Medium`)
for _, item := range podLogA {
if re1.MatchString(item) {
riskHigh++
}
if re2.MatchString(item) {
riskMedium++
}
}
e2e.Logf("rapidast result: riskHigh=%v riskMedium=%v", riskHigh, riskMedium)
if riskHigh > 0 {
return false, fmt.Errorf("high risk alert, please check the scan result report")
}
return true, nil
}
|
logging
| |||
function
|
openshift/openshift-tests-private
|
ac9ec7cb-876c-49ff-a6c4-9c5bc6dcd0b4
|
getOIDC
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func getOIDC(oc *exutil.CLI) (string, error) {
oidc, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("authentication.config", "cluster", "-o=jsonpath={.spec.serviceAccountIssuer}").Output()
if err != nil {
return "", err
}
return strings.TrimPrefix(oidc, "https://"), nil
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
d4bfeddc-4ead-4915-a92c-7af416a95684
|
getPoolID
|
['"encoding/json"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func getPoolID(oc *exutil.CLI) (string, error) {
// pool_id="$(oc get authentication cluster -o json | jq -r .spec.serviceAccountIssuer | sed 's/.*\/\([^\/]*\)-oidc/\1/')"
issuer, err := getOIDC(oc)
if err != nil {
return "", err
}
return strings.Split(strings.Split(issuer, "/")[1], "-oidc")[0], nil
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
611a8bdb-4176-46ee-b080-123853511ab5
|
genLinuxAuditLogsOnWorker
|
['"fmt"', '"strings"', '"cloud.google.com/go/logging"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func genLinuxAuditLogsOnWorker(oc *exutil.CLI) (string, error) {
workerNodes, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
if err != nil || len(workerNodes) == 0 {
return "", fmt.Errorf("can not find schedulable worker to enable audit policy")
}
result, err := exutil.DebugNodeWithChroot(oc, workerNodes[0].Name, "bash", "-c", "auditctl -w /var/log/pods/ -p rwa -k logging-qe-test-read-write-pod-logs")
if err != nil && strings.Contains(result, "Rule exists") {
//Note: we still provide the nodeName here, the policy will be deleted if `defer deleteLinuxAuditPolicyFromNodes` is called.
return workerNodes[0].Name, nil
}
return workerNodes[0].Name, err
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
9a416706-4724-462e-8bf0-21d0cf35483e
|
deleteLinuxAuditPolicyFromNode
|
['"fmt"', '"cloud.google.com/go/logging"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func deleteLinuxAuditPolicyFromNode(oc *exutil.CLI, nodeName string) error {
if nodeName == "" {
return fmt.Errorf("nodeName can not be empty")
}
_, err := exutil.DebugNodeWithChroot(oc, nodeName, "bash", "-c", "auditctl -W /var/log/pods/ -p rwa -k logging-qe-test-read-write-pod-logs")
return err
}
|
logging
| ||||
function
|
openshift/openshift-tests-private
|
01648762-dbce-4ec4-846a-dcd23f72ce07
|
hasMaster
|
['"context"', '"io"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/utils.go
|
func hasMaster(oc *exutil.CLI) bool {
masterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: "node-role.kubernetes.io/master="})
if err != nil {
e2e.Logf("hit error when listing master nodes: %v", err)
}
return len(masterNodes.Items) > 0
}
|
logging
| ||||
test
|
openshift/openshift-tests-private
|
5125d97f-b34e-4a14-99cc-2efe48997543
|
vector_azure
|
import (
"fmt"
"path/filepath"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_azure.go
|
// Package logging is used to test openshift-logging features
package logging
import (
"fmt"
"path/filepath"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-openshift-logging] LOGGING Logging", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("log-to-azure", exutil.KubeConfigPath())
loggingBaseDir string
CLO SubscriptionObjects
)
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO = SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO")
CLO.SubscribeOperator(oc)
oc.SetupProject()
})
//author [email protected]
g.It("CPaasrunOnly-ConnectedOnly-Author:anli-High-71770-Forward logs to Azure Log Analytics -- Minimal Options", func() {
if exutil.IsWorkloadIdentityCluster(oc) {
g.Skip("Skip on the workload identity enabled cluster!")
}
cloudName := getAzureCloudName(oc)
if cloudName != "azurepubliccloud" {
g.Skip("Skip as the cluster is not on Azure Public!")
}
g.By("Create log producer")
clfNS := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", clfNS, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Prepre Azure Log Storage Env")
resourceGroupName, err := exutil.GetAzureCredentialFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
workSpaceName := getInfrastructureName(oc) + "case71770"
azLog, err := newAzureLog(oc, "", resourceGroupName, workSpaceName, "case71770")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy CLF to send logs to Log Analytics")
azureSecret := resource{"secret", "azure-secret-71770", clfNS}
defer azureSecret.clear(oc)
err = azLog.createSecret(oc, azureSecret.name, azureSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-71770",
namespace: clfNS,
secretName: azureSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "azureMonitor-min-opts.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
defer azLog.deleteWorkspace()
clf.create(oc, "PREFIX_OR_NAME="+azLog.tPrefixOrName, "CUSTOMER_ID="+azLog.customerID)
g.By("Verify the test result")
for _, tableName := range []string{azLog.tPrefixOrName + "infra_log_CL", azLog.tPrefixOrName + "audit_log_CL", azLog.tPrefixOrName + "app_log_CL"} {
_, err := azLog.getLogByTable(tableName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("logs are not found in %s in AzureLogWorkspace", tableName))
}
})
})
|
package logging
| ||||
test case
|
openshift/openshift-tests-private
|
289c5158-0443-4b62-b07e-d4e8aa22292c
|
CPaasrunOnly-ConnectedOnly-Author:anli-High-71770-Forward logs to Azure Log Analytics -- Minimal Options
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_azure.go
|
g.It("CPaasrunOnly-ConnectedOnly-Author:anli-High-71770-Forward logs to Azure Log Analytics -- Minimal Options", func() {
if exutil.IsWorkloadIdentityCluster(oc) {
g.Skip("Skip on the workload identity enabled cluster!")
}
cloudName := getAzureCloudName(oc)
if cloudName != "azurepubliccloud" {
g.Skip("Skip as the cluster is not on Azure Public!")
}
g.By("Create log producer")
clfNS := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", clfNS, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Prepre Azure Log Storage Env")
resourceGroupName, err := exutil.GetAzureCredentialFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
workSpaceName := getInfrastructureName(oc) + "case71770"
azLog, err := newAzureLog(oc, "", resourceGroupName, workSpaceName, "case71770")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy CLF to send logs to Log Analytics")
azureSecret := resource{"secret", "azure-secret-71770", clfNS}
defer azureSecret.clear(oc)
err = azLog.createSecret(oc, azureSecret.name, azureSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-71770",
namespace: clfNS,
secretName: azureSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "azureMonitor-min-opts.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
defer azLog.deleteWorkspace()
clf.create(oc, "PREFIX_OR_NAME="+azLog.tPrefixOrName, "CUSTOMER_ID="+azLog.customerID)
g.By("Verify the test result")
for _, tableName := range []string{azLog.tPrefixOrName + "infra_log_CL", azLog.tPrefixOrName + "audit_log_CL", azLog.tPrefixOrName + "app_log_CL"} {
_, err := azLog.getLogByTable(tableName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("logs are not found in %s in AzureLogWorkspace", tableName))
}
})
| |||||
test
|
openshift/openshift-tests-private
|
2ba5584d-52bb-42b5-aa0d-c48520a29b44
|
vector_cloudwatch
|
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_cloudwatch.go
|
package logging
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vector-cw", exutil.KubeConfigPath())
loggingBaseDir string
infraName string
)
g.Context("Log Forward to Cloudwatch using Vector as Collector", func() {
g.BeforeEach(func() {
platform := exutil.CheckPlatform(oc)
if platform != "aws" {
g.Skip("Skip for non-supported platform, the supported platform is AWS!!!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO")
CLO.SubscribeOperator(oc)
oc.SetupProject()
infraName = getInfrastructureName(oc)
})
g.It("Author:qitang-CPaasrunOnly-Medium-76074-Forward logs to Cloudwatch group by namespaceName and groupPrefix", func() {
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "cloudwatch-" + getRandomString(),
secretNamespace: clfNS,
secretName: "logging-76074-" + getRandomString(),
groupName: "logging-76074-" + infraName + `.{.kubernetes.namespace_name||.log_type||"none-typed-logs"}`,
logTypes: []string{"infrastructure", "application", "audit"},
}
defer cw.deleteResources(oc)
cw.init(oc)
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cw.selAppNamespaces = append(cw.selAppNamespaces, appProj)
if !cw.hasMaster {
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
}
g.By("Create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-76074",
namespace: clfNS,
templateFile: template,
secretName: cw.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName)
nodes, err := clf.getCollectorNodeNames(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cw.nodes = append(cw.nodes, nodes...)
g.By("Check logs in Cloudwatch")
o.Expect(cw.logsFound()).To(o.BeTrue())
})
// author [email protected]
g.It("Author:qitang-CPaasrunOnly-High-76075-Forward logs to Cloudwatch using namespaceUUID and groupPrefix", func() {
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "cloudwatch-" + getRandomString(),
secretNamespace: clfNS,
secretName: "logging-76075-" + getRandomString(),
groupName: "logging-76075-" + infraName + `.{.kubernetes.namespace_id||.log_type||"none-typed-logs"}`,
logTypes: []string{"infrastructure", "application", "audit"},
}
defer cw.deleteResources(oc)
cw.init(oc)
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
uuid, err := oc.WithoutNamespace().Run("get").Args("project", appProj, "-ojsonpath={.metadata.uid}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
cw.selNamespacesID = []string{uuid}
if !cw.hasMaster {
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
}
g.By("Create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-76075",
namespace: clfNS,
templateFile: template,
secretName: cw.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName)
g.By("Check logs in Cloudwatch")
o.Expect(cw.checkLogGroupByNamespaceID()).To(o.BeTrue())
o.Expect(cw.infrastructureLogsFound(false)).To(o.BeTrue())
o.Expect(cw.auditLogsFound(false)).To(o.BeTrue())
})
g.It("CPaasrunOnly-Author:ikanse-High-61600-Collector External Cloudwatch output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]", func() {
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "cloudwatch-" + getRandomString(),
secretNamespace: clfNS,
secretName: "logging-61600-" + getRandomString(),
groupName: "logging-61600-" + infraName + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"infrastructure", "application", "audit"},
}
defer cw.deleteResources(oc)
cw.init(oc)
g.By("Configure the global tlsSecurityProfile to use custom profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"custom":{"ciphers":["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256"],"minTLSVersion":"VersionTLS12"},"type":"Custom"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-61600",
namespace: clfNS,
templateFile: template,
secretName: cw.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName)
nodes, err := clf.getCollectorNodeNames(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cw.nodes = append(cw.nodes, nodes...)
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Create log producer")
appProj1 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cw.selAppNamespaces = []string{appProj1}
if !cw.hasMaster {
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
}
g.By("The Cloudwatch sink in Vector config must use the Custom tlsSecurityProfile")
searchString := `[sinks.output_cloudwatch.tls]
min_tls_version = "VersionTLS12"
ciphersuites = "ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("check logs in Cloudwatch")
logGroupName := "logging-61600-" + infraName + ".application"
o.Expect(cw.logsFound()).To(o.BeTrue())
filteredLogs, err := cw.getLogRecordsByNamespace(30, logGroupName, appProj1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(filteredLogs) > 0).Should(o.BeTrue(), "Couldn't filter logs by namespace")
g.By("Set Intermediate tlsSecurityProfile for the Cloudwatch output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls", "value": {"securityProfile": {"type": "Intermediate"}}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("Create log producer")
oc.SetupProject()
appProj2 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj2, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cw.selAppNamespaces = []string{appProj2}
g.By("The Cloudwatch sink in Vector config must use the Intermediate tlsSecurityProfile")
searchString = `[sinks.output_cloudwatch.tls]
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external Cloudwatch server.")
g.By("check logs in Cloudwatch")
o.Expect(cw.logsFound()).To(o.BeTrue())
filteredLogs, err = cw.getLogRecordsByNamespace(30, logGroupName, appProj2)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(filteredLogs) > 0).Should(o.BeTrue(), "Couldn't filter logs by namespace")
})
// author [email protected]
g.It("CPaasrunOnly-Author:qitang-Medium-71778-Collect or exclude logs by matching pod labels and namespaces.[Slow]", func() {
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "cloudwatch-" + getRandomString(),
secretNamespace: clfNS,
secretName: "logging-71778-" + getRandomString(),
groupName: "logging-71778-" + infraName + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"application"},
}
defer cw.deleteResources(oc)
cw.init(oc)
exutil.By("Create projects for app logs and deploy the log generators")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
oc.SetupProject()
appNS1 := oc.Namespace()
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", appNS1, "-p", "LABELS={\"test\": \"logging-71778\", \"test.logging.io/logging.qe-test-label\": \"logging-71778-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
var namespaces []string
for i := 0; i < 3; i++ {
ns := "logging-project-71778-" + strconv.Itoa(i) + "-" + getRandomString()
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
namespaces = append(namespaces, ns)
}
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[0], "-p", "LABELS={\"test.logging-71778\": \"logging-71778\", \"test.logging.io/logging.qe-test-label\": \"logging-71778-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[1], "-p", "LABELS={\"test.logging.io/logging.qe-test-label\": \"logging-71778-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[2], "-p", "LABELS={\"test\": \"logging-71778\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-71778",
namespace: clfNS,
templateFile: template,
secretName: cw.secretName,
collectApplicationLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName, "INPUT_REFS=[\"application\"]")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "myapplogdata", "type": "application", "application": {"selector": {"matchLabels": {"test.logging.io/logging.qe-test-label": "logging-71778-test"}}}}]}, {"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["myapplogdata"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("Check logs in Cloudwatch")
cw.selAppNamespaces = []string{namespaces[0], namespaces[1], appNS1}
cw.disAppNamespaces = []string{namespaces[2]}
o.Expect(cw.logsFound()).To(o.BeTrue())
exutil.By("Update CLF to combine label selector and namespace selector")
patch = `[{"op": "add", "path": "/spec/inputs/0/application/includes", "value": [{"namespace": "*71778*"}]}, {"op": "add", "path": "/spec/inputs/0/application/excludes", "value": [{"namespace": "` + namespaces[1] + `"}]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
//sleep 10 seconds to wait for the caches in collectors to be cleared
time.Sleep(10 * time.Second)
exutil.By("Check logs in Cloudwatch")
newGroupName := "new-logging-71778-" + infraName
clf.update(oc, "", `[{"op": "replace", "path": "/spec/outputs/0/cloudwatch/groupName", "value": "`+newGroupName+`"}]`, "--type=json")
clf.waitForCollectorPodsReady(oc)
defer cw.deleteGroups("logging-71778-" + infraName)
cw.setGroupName(newGroupName)
cw.selAppNamespaces = []string{namespaces[0]}
cw.disAppNamespaces = []string{namespaces[1], namespaces[2], appNS1}
o.Expect(cw.logsFound()).To(o.BeTrue())
})
// author [email protected]
g.It("CPaasrunOnly-Author:qitang-High-71488-Collect container logs from infrastructure projects in an application input.", func() {
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "clf-71488",
secretName: "clf-71488",
secretNamespace: clfNS,
groupName: "logging-71488-" + infraName + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"infrastructure"},
}
defer cw.deleteResources(oc)
cw.init(oc)
exutil.By("Create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-71488",
namespace: clfNS,
templateFile: template,
secretName: cw.secretName,
collectApplicationLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName, "INPUT_REFS=[\"application\"]")
exutil.By("Update CLF to add infra projects to application logs")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app", "type": "application", "application": {"includes": [{"namespace": "openshift*"}]}}]}, {"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
exutil.By("CLF should be rejected as the serviceaccount doesn't have sufficient permissions")
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["infrastructure"] logs`, []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
exutil.By("Add cluster-role/collect-infrastructure-logs to the serviceaccount")
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-infrastructure-logs")
err := addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-infrastructure-logs")
o.Expect(err).NotTo(o.HaveOccurred())
//sleep 2 minutes for CLO to update the CLF
time.Sleep(2 * time.Minute)
checkResource(oc, false, false, `insufficient permissions on service account, not authorized to collect ["infrastructure"] logs`, []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
clf.waitForCollectorPodsReady(oc)
exutil.By("Check logs in Cloudwatch, should find some logs from openshift* projects")
o.Expect(cw.checkInfraContainerLogs(false)).To(o.BeTrue())
})
// author [email protected]
g.It("Author:qitang-CPaasrunOnly-Medium-75415-Validation for multiple cloudwatch outputs in iamRole mode.[Slow]", func() {
if !exutil.IsSTSCluster(oc) {
g.Skip("Skip for the cluster doesn't have STS.")
}
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "clf-75415",
secretName: "clf-75415",
secretNamespace: clfNS,
groupName: "logging-75415-" + infraName + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"application"},
}
defer cw.deleteResources(oc)
cw.init(oc)
fakeCW := cloudwatchSpec{
collectorSAName: "clf-75415",
secretName: "clf-75415-fake",
secretNamespace: clfNS,
groupName: "logging-75415-" + infraName + "-logs",
logTypes: []string{"application"},
}
defer fakeCW.deleteResources(oc)
fakeCW.init(oc)
staticCW := cloudwatchSpec{
collectorSAName: "clf-75415",
secretName: "static-cred",
secretNamespace: clfNS,
groupName: "logging-75415-" + infraName + `-static-cred-logs`,
logTypes: []string{"application"},
}
defer staticCW.deleteResources(oc)
staticCW.init(oc)
exutil.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-75415",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml"),
secretName: cw.secretName,
collectApplicationLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName, "INPUT_REFS=[\"application\"]")
exutil.By("add one output to use the same credentials as the first output")
patch := `[{"op": "add", "path": "/spec/outputs/-", "value": {"name": "cloudwatch-2", "type": "cloudwatch", "cloudwatch": {"authentication": {"type": "iamRole", "iamRole": {"token": {"from": "serviceAccount"}, roleARN: {"key": "role_arn", "secretName": "` + cw.secretName + `"}}}, "groupName": "` + fakeCW.groupName + `", "region": "` + fakeCW.awsRegion + `"}}},{"op": "add", "path": "/spec/pipelines/0/outputRefs/-", "value": "cloudwatch-2"}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("collector pods should send logs to these 2 outputs")
o.Expect(cw.logsFound() && fakeCW.logsFound()).Should(o.BeTrue())
exutil.By("add one output to use static credentials")
secret := resource{"secret", staticCW.secretName, clfNS}
defer secret.clear(oc)
//get credentials
cfg := readDefaultSDKExternalConfigurations(context.TODO(), cw.awsRegion)
cred, _ := cfg.Credentials.Retrieve(context.TODO())
err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secret.name, "--from-literal=aws_access_key_id="+cred.AccessKeyID, "--from-literal=aws_secret_access_key="+cred.SecretAccessKey, "-n", secret.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
patch = `[{"op": "add", "path": "/spec/outputs/-", "value": {"name": "cloudwatch-3", "type": "cloudwatch", "cloudwatch": {"groupName": "` + staticCW.groupName + `", "region": "` + staticCW.awsRegion + `", "authentication": {"type": "awsAccessKey", "awsAccessKey": {"keyId": {"key": "aws_access_key_id", "secretName": "static-cred"}, "keySecret": {"key": "aws_secret_access_key", "secretName": "static-cred"}}}}}}, {"op": "add", "path": "/spec/pipelines/0/outputRefs/-", "value": "cloudwatch-3"}]`
clf.update(oc, "", patch, "--type=json")
// wait for 10 seconds for collector pods to load new config
time.Sleep(10 * time.Second)
clf.waitForCollectorPodsReady(oc)
cw.deleteGroups("")
fakeCW.deleteGroups("")
staticCW.deleteGroups("")
exutil.By("collector pods should send logs to these 3 outputs")
o.Expect(cw.logsFound() && fakeCW.logsFound() && staticCW.logsFound()).Should(o.BeTrue())
exutil.By("update the second output to use another role_arn")
patch = `[{"op": "replace", "path": "/spec/outputs/1/cloudwatch/authentication/iamRole/roleARN/secretName", "value": "` + fakeCW.secretName + `"}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, "Found multiple different CloudWatch RoleARN authorizations in the outputs spec", []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.outputConditions[*].message}"})
})
// author [email protected]
g.It("Author:qitang-CPaasrunOnly-Medium-75417-Validation for multiple CloudWatch outputs in awsAccessKey mode.", func() {
if exutil.IsSTSCluster(oc) {
g.Skip("Skip for the cluster have STS enabled.")
}
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "clf-75417",
secretName: "clf-75417",
secretNamespace: clfNS,
groupName: "logging-75417-" + infraName + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"application"},
}
defer cw.deleteResources(oc)
cw.init(oc)
fakeCW := cloudwatchSpec{
collectorSAName: "clf-75417",
secretName: "clf-75417-fake",
secretNamespace: clfNS,
groupName: "logging-75417-" + infraName + "-logs",
logTypes: []string{"application"},
}
defer fakeCW.deleteResources(oc)
fakeCW.init(oc)
exutil.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-75417",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml"),
secretName: cw.secretName,
collectApplicationLogs: true,
waitForPodReady: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName, "INPUT_REFS=[\"application\"]")
exutil.By("add one output to the CLF with same same secret")
patch := `[{"op": "add", "path": "/spec/outputs/-", "value": {"name": "new-cloudwatch-2", "type": "cloudwatch", "cloudwatch": {"authentication": {"type": "awsAccessKey", "awsAccessKey": {"keyId": {"key": "aws_access_key_id", "secretName": "` + cw.secretName + `"}, "keySecret": {"key": "aws_secret_access_key", "secretName": "` + cw.secretName + `"}}}, "groupName": "` + fakeCW.groupName + `", "region": "` + fakeCW.awsRegion + `"}}},{"op": "add", "path": "/spec/pipelines/0/outputRefs/-", "value": "new-cloudwatch-2"}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cw.logsFound() && fakeCW.logsFound()).Should(o.BeTrue())
exutil.By("update one of the output to use another secret")
//since we can't get another aws key pair, here add a secret with fake aws_access_key_id and aws_secret_access_key
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", clf.namespace, fakeCW.secretName, "--from-literal=aws_access_key_id="+getRandomString(), "--from-literal=aws_secret_access_key="+getRandomString()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
patch = `[{"op": "replace", "path": "/spec/outputs/0/cloudwatch/authentication/awsAccessKey/keyId/secretName", "value": "` + fakeCW.secretName + `"}, {"op": "replace", "path": "/spec/outputs/0/cloudwatch/authentication/awsAccessKey/keySecret/secretName", "value": "` + fakeCW.secretName + `"}]`
clf.update(oc, "", patch, "--type=json")
//sleep 10 seconds for collector pods to load new credentials
time.Sleep(10 * time.Second)
clf.waitForCollectorPodsReady(oc)
cw.deleteGroups("")
fakeCW.deleteGroups("")
//ensure collector pods still can forward logs to cloudwatch with correct credentials
o.Expect(cw.logsFound() || fakeCW.logsFound()).Should(o.BeTrue())
})
})
})
|
package logging
| ||||
test case
|
openshift/openshift-tests-private
|
a199efcb-b363-47ca-a365-68d7e53c447d
|
Author:qitang-CPaasrunOnly-Medium-76074-Forward logs to Cloudwatch group by namespaceName and groupPrefix
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_cloudwatch.go
|
g.It("Author:qitang-CPaasrunOnly-Medium-76074-Forward logs to Cloudwatch group by namespaceName and groupPrefix", func() {
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "cloudwatch-" + getRandomString(),
secretNamespace: clfNS,
secretName: "logging-76074-" + getRandomString(),
groupName: "logging-76074-" + infraName + `.{.kubernetes.namespace_name||.log_type||"none-typed-logs"}`,
logTypes: []string{"infrastructure", "application", "audit"},
}
defer cw.deleteResources(oc)
cw.init(oc)
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cw.selAppNamespaces = append(cw.selAppNamespaces, appProj)
if !cw.hasMaster {
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
}
g.By("Create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-76074",
namespace: clfNS,
templateFile: template,
secretName: cw.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName)
nodes, err := clf.getCollectorNodeNames(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cw.nodes = append(cw.nodes, nodes...)
g.By("Check logs in Cloudwatch")
o.Expect(cw.logsFound()).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
0de33459-af05-4fa9-8064-c264467c998f
|
Author:qitang-CPaasrunOnly-High-76075-Forward logs to Cloudwatch using namespaceUUID and groupPrefix
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_cloudwatch.go
|
g.It("Author:qitang-CPaasrunOnly-High-76075-Forward logs to Cloudwatch using namespaceUUID and groupPrefix", func() {
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "cloudwatch-" + getRandomString(),
secretNamespace: clfNS,
secretName: "logging-76075-" + getRandomString(),
groupName: "logging-76075-" + infraName + `.{.kubernetes.namespace_id||.log_type||"none-typed-logs"}`,
logTypes: []string{"infrastructure", "application", "audit"},
}
defer cw.deleteResources(oc)
cw.init(oc)
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
uuid, err := oc.WithoutNamespace().Run("get").Args("project", appProj, "-ojsonpath={.metadata.uid}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
cw.selNamespacesID = []string{uuid}
if !cw.hasMaster {
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
}
g.By("Create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-76075",
namespace: clfNS,
templateFile: template,
secretName: cw.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName)
g.By("Check logs in Cloudwatch")
o.Expect(cw.checkLogGroupByNamespaceID()).To(o.BeTrue())
o.Expect(cw.infrastructureLogsFound(false)).To(o.BeTrue())
o.Expect(cw.auditLogsFound(false)).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
d9973a0d-5d8b-4436-a10b-bb3916cb43ba
|
CPaasrunOnly-Author:ikanse-High-61600-Collector External Cloudwatch output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_cloudwatch.go
|
g.It("CPaasrunOnly-Author:ikanse-High-61600-Collector External Cloudwatch output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]", func() {
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "cloudwatch-" + getRandomString(),
secretNamespace: clfNS,
secretName: "logging-61600-" + getRandomString(),
groupName: "logging-61600-" + infraName + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"infrastructure", "application", "audit"},
}
defer cw.deleteResources(oc)
cw.init(oc)
g.By("Configure the global tlsSecurityProfile to use custom profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"custom":{"ciphers":["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256"],"minTLSVersion":"VersionTLS12"},"type":"Custom"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-61600",
namespace: clfNS,
templateFile: template,
secretName: cw.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName)
nodes, err := clf.getCollectorNodeNames(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cw.nodes = append(cw.nodes, nodes...)
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Create log producer")
appProj1 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cw.selAppNamespaces = []string{appProj1}
if !cw.hasMaster {
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
}
g.By("The Cloudwatch sink in Vector config must use the Custom tlsSecurityProfile")
searchString := `[sinks.output_cloudwatch.tls]
min_tls_version = "VersionTLS12"
ciphersuites = "ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("check logs in Cloudwatch")
logGroupName := "logging-61600-" + infraName + ".application"
o.Expect(cw.logsFound()).To(o.BeTrue())
filteredLogs, err := cw.getLogRecordsByNamespace(30, logGroupName, appProj1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(filteredLogs) > 0).Should(o.BeTrue(), "Couldn't filter logs by namespace")
g.By("Set Intermediate tlsSecurityProfile for the Cloudwatch output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls", "value": {"securityProfile": {"type": "Intermediate"}}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("Create log producer")
oc.SetupProject()
appProj2 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj2, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cw.selAppNamespaces = []string{appProj2}
g.By("The Cloudwatch sink in Vector config must use the Intermediate tlsSecurityProfile")
searchString = `[sinks.output_cloudwatch.tls]
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external Cloudwatch server.")
g.By("check logs in Cloudwatch")
o.Expect(cw.logsFound()).To(o.BeTrue())
filteredLogs, err = cw.getLogRecordsByNamespace(30, logGroupName, appProj2)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(filteredLogs) > 0).Should(o.BeTrue(), "Couldn't filter logs by namespace")
})
| |||||
test case
|
openshift/openshift-tests-private
|
4186044a-bf74-4804-b9e5-f96118128a1c
|
CPaasrunOnly-Author:qitang-Medium-71778-Collect or exclude logs by matching pod labels and namespaces.[Slow]
|
['"path/filepath"', '"strconv"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_cloudwatch.go
|
g.It("CPaasrunOnly-Author:qitang-Medium-71778-Collect or exclude logs by matching pod labels and namespaces.[Slow]", func() {
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "cloudwatch-" + getRandomString(),
secretNamespace: clfNS,
secretName: "logging-71778-" + getRandomString(),
groupName: "logging-71778-" + infraName + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"application"},
}
defer cw.deleteResources(oc)
cw.init(oc)
exutil.By("Create projects for app logs and deploy the log generators")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
oc.SetupProject()
appNS1 := oc.Namespace()
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", appNS1, "-p", "LABELS={\"test\": \"logging-71778\", \"test.logging.io/logging.qe-test-label\": \"logging-71778-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
var namespaces []string
for i := 0; i < 3; i++ {
ns := "logging-project-71778-" + strconv.Itoa(i) + "-" + getRandomString()
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
namespaces = append(namespaces, ns)
}
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[0], "-p", "LABELS={\"test.logging-71778\": \"logging-71778\", \"test.logging.io/logging.qe-test-label\": \"logging-71778-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[1], "-p", "LABELS={\"test.logging.io/logging.qe-test-label\": \"logging-71778-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[2], "-p", "LABELS={\"test\": \"logging-71778\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-71778",
namespace: clfNS,
templateFile: template,
secretName: cw.secretName,
collectApplicationLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName, "INPUT_REFS=[\"application\"]")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "myapplogdata", "type": "application", "application": {"selector": {"matchLabels": {"test.logging.io/logging.qe-test-label": "logging-71778-test"}}}}]}, {"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["myapplogdata"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("Check logs in Cloudwatch")
cw.selAppNamespaces = []string{namespaces[0], namespaces[1], appNS1}
cw.disAppNamespaces = []string{namespaces[2]}
o.Expect(cw.logsFound()).To(o.BeTrue())
exutil.By("Update CLF to combine label selector and namespace selector")
patch = `[{"op": "add", "path": "/spec/inputs/0/application/includes", "value": [{"namespace": "*71778*"}]}, {"op": "add", "path": "/spec/inputs/0/application/excludes", "value": [{"namespace": "` + namespaces[1] + `"}]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
//sleep 10 seconds to wait for the caches in collectors to be cleared
time.Sleep(10 * time.Second)
exutil.By("Check logs in Cloudwatch")
newGroupName := "new-logging-71778-" + infraName
clf.update(oc, "", `[{"op": "replace", "path": "/spec/outputs/0/cloudwatch/groupName", "value": "`+newGroupName+`"}]`, "--type=json")
clf.waitForCollectorPodsReady(oc)
defer cw.deleteGroups("logging-71778-" + infraName)
cw.setGroupName(newGroupName)
cw.selAppNamespaces = []string{namespaces[0]}
cw.disAppNamespaces = []string{namespaces[1], namespaces[2], appNS1}
o.Expect(cw.logsFound()).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
300ddadd-c24c-40e2-b618-08cf1cab04bc
|
CPaasrunOnly-Author:qitang-High-71488-Collect container logs from infrastructure projects in an application input.
|
['"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_cloudwatch.go
|
g.It("CPaasrunOnly-Author:qitang-High-71488-Collect container logs from infrastructure projects in an application input.", func() {
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "clf-71488",
secretName: "clf-71488",
secretNamespace: clfNS,
groupName: "logging-71488-" + infraName + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"infrastructure"},
}
defer cw.deleteResources(oc)
cw.init(oc)
exutil.By("Create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-71488",
namespace: clfNS,
templateFile: template,
secretName: cw.secretName,
collectApplicationLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName, "INPUT_REFS=[\"application\"]")
exutil.By("Update CLF to add infra projects to application logs")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app", "type": "application", "application": {"includes": [{"namespace": "openshift*"}]}}]}, {"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
exutil.By("CLF should be rejected as the serviceaccount doesn't have sufficient permissions")
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["infrastructure"] logs`, []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
exutil.By("Add cluster-role/collect-infrastructure-logs to the serviceaccount")
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-infrastructure-logs")
err := addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "collect-infrastructure-logs")
o.Expect(err).NotTo(o.HaveOccurred())
//sleep 2 minutes for CLO to update the CLF
time.Sleep(2 * time.Minute)
checkResource(oc, false, false, `insufficient permissions on service account, not authorized to collect ["infrastructure"] logs`, []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
clf.waitForCollectorPodsReady(oc)
exutil.By("Check logs in Cloudwatch, should find some logs from openshift* projects")
o.Expect(cw.checkInfraContainerLogs(false)).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
9c060450-341f-436f-a8c9-1bae030f2b11
|
Author:qitang-CPaasrunOnly-Medium-75415-Validation for multiple cloudwatch outputs in iamRole mode.[Slow]
|
['"context"', '"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_cloudwatch.go
|
g.It("Author:qitang-CPaasrunOnly-Medium-75415-Validation for multiple cloudwatch outputs in iamRole mode.[Slow]", func() {
if !exutil.IsSTSCluster(oc) {
g.Skip("Skip for the cluster doesn't have STS.")
}
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "clf-75415",
secretName: "clf-75415",
secretNamespace: clfNS,
groupName: "logging-75415-" + infraName + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"application"},
}
defer cw.deleteResources(oc)
cw.init(oc)
fakeCW := cloudwatchSpec{
collectorSAName: "clf-75415",
secretName: "clf-75415-fake",
secretNamespace: clfNS,
groupName: "logging-75415-" + infraName + "-logs",
logTypes: []string{"application"},
}
defer fakeCW.deleteResources(oc)
fakeCW.init(oc)
staticCW := cloudwatchSpec{
collectorSAName: "clf-75415",
secretName: "static-cred",
secretNamespace: clfNS,
groupName: "logging-75415-" + infraName + `-static-cred-logs`,
logTypes: []string{"application"},
}
defer staticCW.deleteResources(oc)
staticCW.init(oc)
exutil.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-75415",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml"),
secretName: cw.secretName,
collectApplicationLogs: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName, "INPUT_REFS=[\"application\"]")
exutil.By("add one output to use the same credentials as the first output")
patch := `[{"op": "add", "path": "/spec/outputs/-", "value": {"name": "cloudwatch-2", "type": "cloudwatch", "cloudwatch": {"authentication": {"type": "iamRole", "iamRole": {"token": {"from": "serviceAccount"}, roleARN: {"key": "role_arn", "secretName": "` + cw.secretName + `"}}}, "groupName": "` + fakeCW.groupName + `", "region": "` + fakeCW.awsRegion + `"}}},{"op": "add", "path": "/spec/pipelines/0/outputRefs/-", "value": "cloudwatch-2"}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("collector pods should send logs to these 2 outputs")
o.Expect(cw.logsFound() && fakeCW.logsFound()).Should(o.BeTrue())
exutil.By("add one output to use static credentials")
secret := resource{"secret", staticCW.secretName, clfNS}
defer secret.clear(oc)
//get credentials
cfg := readDefaultSDKExternalConfigurations(context.TODO(), cw.awsRegion)
cred, _ := cfg.Credentials.Retrieve(context.TODO())
err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secret.name, "--from-literal=aws_access_key_id="+cred.AccessKeyID, "--from-literal=aws_secret_access_key="+cred.SecretAccessKey, "-n", secret.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
patch = `[{"op": "add", "path": "/spec/outputs/-", "value": {"name": "cloudwatch-3", "type": "cloudwatch", "cloudwatch": {"groupName": "` + staticCW.groupName + `", "region": "` + staticCW.awsRegion + `", "authentication": {"type": "awsAccessKey", "awsAccessKey": {"keyId": {"key": "aws_access_key_id", "secretName": "static-cred"}, "keySecret": {"key": "aws_secret_access_key", "secretName": "static-cred"}}}}}}, {"op": "add", "path": "/spec/pipelines/0/outputRefs/-", "value": "cloudwatch-3"}]`
clf.update(oc, "", patch, "--type=json")
// wait for 10 seconds for collector pods to load new config
time.Sleep(10 * time.Second)
clf.waitForCollectorPodsReady(oc)
cw.deleteGroups("")
fakeCW.deleteGroups("")
staticCW.deleteGroups("")
exutil.By("collector pods should send logs to these 3 outputs")
o.Expect(cw.logsFound() && fakeCW.logsFound() && staticCW.logsFound()).Should(o.BeTrue())
exutil.By("update the second output to use another role_arn")
patch = `[{"op": "replace", "path": "/spec/outputs/1/cloudwatch/authentication/iamRole/roleARN/secretName", "value": "` + fakeCW.secretName + `"}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, "Found multiple different CloudWatch RoleARN authorizations in the outputs spec", []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.outputConditions[*].message}"})
})
| |||||
test case
|
openshift/openshift-tests-private
|
4d096084-48cf-46a9-813f-ca5cbef69a92
|
Author:qitang-CPaasrunOnly-Medium-75417-Validation for multiple CloudWatch outputs in awsAccessKey mode.
|
['"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_cloudwatch.go
|
g.It("Author:qitang-CPaasrunOnly-Medium-75417-Validation for multiple CloudWatch outputs in awsAccessKey mode.", func() {
if exutil.IsSTSCluster(oc) {
g.Skip("Skip for the cluster have STS enabled.")
}
g.By("init Cloudwatch test spec")
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "clf-75417",
secretName: "clf-75417",
secretNamespace: clfNS,
groupName: "logging-75417-" + infraName + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"application"},
}
defer cw.deleteResources(oc)
cw.init(oc)
fakeCW := cloudwatchSpec{
collectorSAName: "clf-75417",
secretName: "clf-75417-fake",
secretNamespace: clfNS,
groupName: "logging-75417-" + infraName + "-logs",
logTypes: []string{"application"},
}
defer fakeCW.deleteResources(oc)
fakeCW.init(oc)
exutil.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-75417",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml"),
secretName: cw.secretName,
collectApplicationLogs: true,
waitForPodReady: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName, "INPUT_REFS=[\"application\"]")
exutil.By("add one output to the CLF with same same secret")
patch := `[{"op": "add", "path": "/spec/outputs/-", "value": {"name": "new-cloudwatch-2", "type": "cloudwatch", "cloudwatch": {"authentication": {"type": "awsAccessKey", "awsAccessKey": {"keyId": {"key": "aws_access_key_id", "secretName": "` + cw.secretName + `"}, "keySecret": {"key": "aws_secret_access_key", "secretName": "` + cw.secretName + `"}}}, "groupName": "` + fakeCW.groupName + `", "region": "` + fakeCW.awsRegion + `"}}},{"op": "add", "path": "/spec/pipelines/0/outputRefs/-", "value": "new-cloudwatch-2"}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cw.logsFound() && fakeCW.logsFound()).Should(o.BeTrue())
exutil.By("update one of the output to use another secret")
//since we can't get another aws key pair, here add a secret with fake aws_access_key_id and aws_secret_access_key
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", clf.namespace, fakeCW.secretName, "--from-literal=aws_access_key_id="+getRandomString(), "--from-literal=aws_secret_access_key="+getRandomString()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
patch = `[{"op": "replace", "path": "/spec/outputs/0/cloudwatch/authentication/awsAccessKey/keyId/secretName", "value": "` + fakeCW.secretName + `"}, {"op": "replace", "path": "/spec/outputs/0/cloudwatch/authentication/awsAccessKey/keySecret/secretName", "value": "` + fakeCW.secretName + `"}]`
clf.update(oc, "", patch, "--type=json")
//sleep 10 seconds for collector pods to load new credentials
time.Sleep(10 * time.Second)
clf.waitForCollectorPodsReady(oc)
cw.deleteGroups("")
fakeCW.deleteGroups("")
//ensure collector pods still can forward logs to cloudwatch with correct credentials
o.Expect(cw.logsFound() || fakeCW.logsFound()).Should(o.BeTrue())
})
| |||||
test
|
openshift/openshift-tests-private
|
ff6d7ee8-f33d-4abd-9eae-3296e71b25f7
|
vector_google_cloud_logging
|
import (
"context"
"fmt"
"path/filepath"
"strconv"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
)
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_google_cloud_logging.go
|
package logging
import (
"context"
"fmt"
"path/filepath"
"strconv"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vector-to-google-cloud-logging", exutil.KubeConfigPath())
loggingBaseDir string
)
g.BeforeEach(func() {
platform := exutil.CheckPlatform(oc)
if platform != "gcp" {
g.Skip("Skip for non-supported platform, the supported platform is GCP!!!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
g.By("deploy CLO")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
CLO.SubscribeOperator(oc)
oc.SetupProject()
})
//author [email protected]
g.It("Author:qitang-CPaasrunOnly-High-53731-Forward logs to GCL using different logName for each log type and using Service Account authentication.", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
logName := getInfrastructureName(oc) + "-53731"
logTypes := []string{"infrastructure", "audit", "application"}
for _, logType := range logTypes {
defer googleCloudLogging{projectID: projectID, logName: logName + "-" + logType}.removeLogs()
}
oc.SetupProject()
clfNS := oc.Namespace()
gcpSecret := resource{"secret", "gcp-secret-53731", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-53731",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "google-cloud-logging-multi-logids.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+projectID, "LOG_ID="+logName)
for _, logType := range logTypes {
gcl := googleCloudLogging{
projectID: projectID,
logName: logName + "-" + logType,
}
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByType(logType)
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s logs are not found", logType))
}
})
//author [email protected]
g.It("CPaasrunOnly-Author:qitang-High-71003-Collect or exclude logs by matching pod expressions[Slow]", func() {
clfNS := oc.Namespace()
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcl := googleCloudLogging{
projectID: projectID,
logName: getInfrastructureName(oc) + "-71003",
}
defer gcl.removeLogs()
gcpSecret := resource{"secret", "gcp-secret-71003", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-71003",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "googleCloudLogging.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+gcl.projectID, "LOG_ID="+gcl.logName, "INPUT_REFS=[\"application\"]")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app","type": "application","application": {"selector": {"matchExpressions": [{"key": "test.logging.io/logging.qe-test-label", "operator": "In", "values": ["logging-71003-test-0", "logging-71003-test-1", "logging-71003-test-2"]},{"key":"test", "operator":"Exists"}]}}}]}, {"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Create project for app logs and deploy the log generator")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
var namespaces []string
for i := 0; i < 4; i++ {
ns := "logging-project-71003-" + strconv.Itoa(i)
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
namespaces = append(namespaces, ns)
}
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[0], "-p", "LABELS={\"test\": \"logging-71003-0\", \"test.logging.io/logging.qe-test-label\": \"logging-71003-test-0\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[1], "-p", "LABELS={\"test.logging-71003\": \"logging-71003-1\", \"test.logging.io/logging.qe-test-label\": \"logging-71003-test-1\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[2], "-p", "LABELS={\"test.logging.io/logging.qe-test-label\": \"logging-71003-test-2\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[3], "-p", "LABELS={\"test\": \"logging-71003-3\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check data in google cloud logging, only logs from project namespaces[0] should be collected")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
appLogs1, err := gcl.getLogByNamespace(namespaces[0])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs1) > 0).Should(o.BeTrue())
for i := 1; i < 4; i++ {
appLogs, err := gcl.getLogByNamespace(namespaces[i])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) == 0).Should(o.BeTrue())
}
exutil.By("Update CLF, change the matchExpressions")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/selector/matchExpressions", "value": [{"key": "test.logging.io/logging.qe-test-label", "operator": "In", "values": ["logging-71003-test-0", "logging-71003-test-1", "logging-71003-test-2"]},{"key":"test", "operator":"DoesNotExist"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, logs from project namespaces[1] and namespaces[2] should be collected")
err = gcl.waitForLogsAppearByNamespace(namespaces[1])
exutil.AssertWaitPollNoErr(err, "can't find logs from project "+namespaces[1])
err = gcl.waitForLogsAppearByNamespace(namespaces[2])
exutil.AssertWaitPollNoErr(err, "can't find logs from project "+namespaces[2])
for _, ns := range []string{namespaces[0], namespaces[3]} {
appLogs, err := gcl.getLogByNamespace(ns)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) == 0).Should(o.BeTrue(), "find logs from project"+ns+", this is not expected")
}
exutil.By("Update CLF, change the matchExpressions")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/selector/matchExpressions", "value": [{"key": "test.logging.io/logging.qe-test-label", "operator": "NotIn", "values": ["logging-71003-test-0", "logging-71003-test-1", "logging-71003-test-2"]},{"key":"test", "operator":"Exists"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, logs from project namespaces[3] should be collected")
err = gcl.waitForLogsAppearByNamespace(namespaces[3])
exutil.AssertWaitPollNoErr(err, "can't find logs from project "+namespaces[3])
for i := 0; i < 3; i++ {
appLogs, err := gcl.getLogByNamespace(namespaces[i])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) == 0).Should(o.BeTrue(), "find logs from project"+namespaces[i]+", this is not expected")
}
})
g.It("CPaasrunOnly-Author:ikanse-High-61602-Collector external Google Cloud logging complies with the tlsSecurityProfile configuration. [Slow][Disruptive]", func() {
g.By("Configure the global tlsSecurityProfile to use Intermediate profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"intermediate":{},"type":"Intermediate"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Create log producer")
appProj1 := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcl := googleCloudLogging{
projectID: projectID,
logName: getInfrastructureName(oc) + "-61602",
}
defer gcl.removeLogs()
gcpSecret := resource{"secret", "gcp-secret-61602", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-61602",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "googleCloudLogging.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+gcl.projectID, "LOG_ID="+gcl.logName, "INPUT_REFS=[\"application\"]")
g.By("The Google Cloud sink in Vector config must use the intermediate tlsSecurityProfile")
searchString := `[sinks.output_gcp_logging.tls]
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByType("application")
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, "application logs are not found")
appLogs1, err := gcl.getLogByNamespace(appProj1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs1) > 0).Should(o.BeTrue())
g.By("Set Modern tlsSecurityProfile for the External Google Cloud logging output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls", "value": {"securityProfile": {"type": "Modern"}}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("Delete logs from Google Cloud Logging")
err = gcl.removeLogs()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("The Google Cloud sink in Vector config must use the Modern tlsSecurityProfile")
searchString = `[sinks.output_gcp_logging.tls]
min_tls_version = "VersionTLS13"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByType("application")
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, "application logs are not found")
appLogs1, err = gcl.getLogByNamespace(appProj1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs1) > 0).Should(o.BeTrue())
})
//author [email protected]
g.It("CPaasrunOnly-Author:qitang-Medium-71777-Include or exclude logs by combining namespace and container selectors.[Slow]", func() {
clfNS := oc.Namespace()
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcl := googleCloudLogging{
projectID: projectID,
logName: getInfrastructureName(oc) + "-71777",
}
defer gcl.removeLogs()
gcpSecret := resource{"secret", "gcp-secret-71777", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-71777",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "googleCloudLogging.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+gcl.projectID, "LOG_ID="+gcl.logName, "INPUT_REFS=[\"application\"]")
exutil.By("exclude logs from specific container in specific namespace")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app","type": "application" ,"application": {"excludes": [{"namespace": "logging-log-71777", "container": "exclude-log-71777"}]}}]}, {"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Create project for app logs and deploy the log generator")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
namespaces := []string{"logging-log-71777", "logging-data-71777", "e2e-test-log-71777"}
containerNames := []string{"log-71777-include", "exclude-log-71777"}
for _, ns := range namespaces {
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
for _, container := range containerNames {
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", ns, "-p", "CONTAINER="+container, "-p", "CONFIGMAP="+container, "-p", "REPLICATIONCONTROLLER="+container).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
generateQuery := func(ns, container string) string {
return fmt.Sprintf(` AND jsonPayload.kubernetes.namespace_name="%s" AND jsonPayload.kubernetes.container_name="%s"`, ns, container)
}
exutil.By("Check data in google cloud logging, logs from container/exclude-log-71777 in project/logging-log-71777 shouldn't be collected")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
for _, ns := range []string{"logging-data-71777", "e2e-test-log-71777"} {
for _, container := range containerNames {
appLogs, err := gcl.listLogEntries(generateQuery(ns, container))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) > 0).Should(o.BeTrue(), "can't find logs from container "+container+" in project"+ns+", this is not expected")
}
}
appLogs, err := gcl.listLogEntries(generateQuery("logging-log-71777", "log-71777-include"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) > 0).Should(o.BeTrue(), "can't find logs from container/log-71777-include in project/logging-log-71777, this is not expected")
appLogs, err = gcl.listLogEntries(generateQuery("logging-log-71777", "exclude-log-71777"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) == 0).Should(o.BeTrue(), "find logs from container/exclude-log-71777 in project/logging-log-71777, this is not expected")
exutil.By("exclude logs from specific containers in all namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/excludes", "value": [{"namespace": "*", "container": "exclude-log-71777"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, no logs from container/exclude-log-71777")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
for _, ns := range namespaces {
err = gcl.waitForLogsAppearByNamespace(ns)
exutil.AssertWaitPollNoErr(err, "can't find logs from project "+ns)
}
logs, err := gcl.listLogEntries(` AND jsonPayload.kubernetes.container_name="exclude-log-71777"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) == 0).Should(o.BeTrue(), "find logs from container exclude-log-71777, this is not expected")
exutil.By("exclude logs from all containers in specific namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/excludes", "value": [{"namespace": "e2e-test-log-71777", "container": "*"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, no logs from project/e2e-test-log-71777")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
for _, ns := range []string{"logging-log-71777", "logging-data-71777"} {
for _, container := range containerNames {
appLogs, err := gcl.listLogEntries(generateQuery(ns, container))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) > 0).Should(o.BeTrue(), "can't find logs from container "+container+" in project"+ns+", this is not expected")
}
}
logs, err = gcl.getLogByNamespace("e2e-test-log-71777")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) == 0).Should(o.BeTrue(), "find logs from project e2e-test-log-71777, this is not expected")
exutil.By("Update CLF to collect logs from specific containers in specific namespaces")
patch = `[{"op": "remove", "path": "/spec/inputs/0/application/excludes"}, {"op": "add", "path": "/spec/inputs/0/application", "value": {"includes": [{"namespace": "logging-log-71777", "container": "log-71777-include"}]}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, only logs from container log-71777-include in project logging-log-71777 should be collected")
err = gcl.waitForLogsAppearByNamespace("logging-log-71777")
exutil.AssertWaitPollNoErr(err, "logs from project logging-log-71777 are not collected")
includeLogs, err := gcl.listLogEntries(generateQuery("logging-log-71777", "log-71777-include"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(includeLogs) > 0).Should(o.BeTrue(), "can't find logs from container log-71777-include in project logging-log-71777, this is not expected")
excludeLogs, err := gcl.listLogEntries(` AND jsonPayload.kubernetes.namespace_name!="logging-log-71777" OR jsonPayload.kubernetes.container_name!="log-71777-include"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(excludeLogs) == 0).Should(o.BeTrue(), "find logs from other containers, this is not expected")
exutil.By("collect logs from specific containers in all namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/includes", "value": [{"namespace": "*", "container": "log-71777-include"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, only logs from container/log-71777-include should be collected")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
for _, ns := range namespaces {
err = gcl.waitForLogsAppearByNamespace(ns)
exutil.AssertWaitPollNoErr(err, "can't find logs from project "+ns)
}
logs, err = gcl.listLogEntries(` AND jsonPayload.kubernetes.container_name != "log-71777-include"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) == 0).Should(o.BeTrue(), "find logs from other containers, this is not expected")
// no logs from openshift* projects
exutil.By("collect logs from all containers in specific namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/includes", "value": [{"namespace": "logging-data-71777", "container": "*"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, only logs from project/logging-data-71777 should be collected")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
logs, err = gcl.listLogEntries(` AND jsonPayload.kubernetes.namespace_name != "logging-data-71777"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) == 0).Should(o.BeTrue(), "find logs from other projects, this is not expected")
logs, err = gcl.getLogByNamespace("logging-data-71777")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue(), "can't find logs from project logging-data-71777, this is not expected")
exutil.By("combine includes and excludes")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application", "value": {"includes": [{"namespace": "*log*", "container": "log-71777*"}], "excludes": [{"namespace": "logging*71777", "container": "log-71777-include"}]}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, only logs from container/log-71777-include in project/e2e-test-log-71777 should be collected")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
logs, err = gcl.listLogEntries(generateQuery("e2e-test-log-71777", "log-71777-include"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue(), "can't find logs from container log-71777-include in project e2e-test-log-71777, this is not expected")
logs, err = gcl.listLogEntries(` AND jsonPayload.kubernetes.namespace_name!="e2e-test-log-71777" OR jsonPayload.kubernetes.container_name!="log-71777-include"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) == 0).Should(o.BeTrue(), "find logs from other containers, this is not expected")
})
// author [email protected]
g.It("CPaasrunOnly-ConnectedOnly-Author:qitang-Medium-71753-Prune fields from log messages", func() {
exutil.By("Create CLF")
clfNS := oc.Namespace()
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcl := googleCloudLogging{
projectID: projectID,
logName: getInfrastructureName(oc) + "-71753",
}
defer gcl.removeLogs()
gcpSecret := resource{"secret", "gcp-secret-71753", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-71753",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "googleCloudLogging.yaml"),
collectApplicationLogs: true,
collectInfrastructureLogs: true,
collectAuditLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+gcl.projectID, "LOG_ID="+gcl.logName)
exutil.By("Add prune filters to CLF")
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "prune-logs", "type": "prune", "prune": {"in": [".kubernetes.namespace_name",".kubernetes.labels.\"test.logging.io/logging.qe-test-label\"",".file",".kubernetes.annotations"]}}]},
{"op": "add", "path": "/spec/pipelines/0/filterRefs", "value": ["prune-logs"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Create project for app logs and deploy the log generator")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
oc.SetupProject()
ns := oc.Namespace()
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", ns, "-p", "LABELS={\"test\": \"logging-71753-test\", \"test.logging.io/logging.qe-test-label\": \"logging-71753-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check logs in google cloud logging")
for _, logType := range []string{"application", "infrastructure", "audit"} {
err = gcl.waitForLogsAppearByType(logType)
exutil.AssertWaitPollNoErr(err, logType+" logs are not collected")
}
exutil.By("Fields kubernetes.namespace_name, kubernetes.labels.\"test.logging.io/logging.qe-test-label\", kubernetes.annotations and file should be pruned")
// sleep 30 seconds for collector pods to send new data to google cloud logging
time.Sleep(30 * time.Second)
logs, err := gcl.getLogByType("application")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue())
extractedLogs, err := extractGoogleCloudLoggingLogs(logs)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(extractedLogs) > 0).Should(o.BeTrue())
o.Expect(extractedLogs[0].File == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Annotations == nil).Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.NamespaceName == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Lables["test_logging_io_logging_qe-test-label"] == "").Should(o.BeTrue())
exutil.By("Prune .hostname, the CLF should be rejected")
patch = `[{"op": "replace", "path": "/spec/filters/0/prune/in", "value": [".hostname",".kubernetes.namespace_name",".kubernetes.labels.\"test.logging.io/logging.qe-test-label\"",".file",".kubernetes.annotations"]}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, "\"prune-logs\" prunes the `.hostname` field which is required for output: \"gcp-logging\" of type \"googleCloudLogging\"", []string{"clusterlogforwarders.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.pipelineConditions[0].message}"})
exutil.By("Update CLF to only reserve several fields")
patch = `[{"op": "replace", "path": "/spec/filters/0/prune", "value": {"notIn": [".log_type",".log_source",".message",".kubernetes",".\"@timestamp\"",".openshift",".hostname"]}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 60 seconds for collector pods to send new data to google cloud logging
time.Sleep(60 * time.Second)
exutil.By("Check logs in google cloud logging")
err = gcl.waitForLogsAppearByNamespace(ns)
exutil.AssertWaitPollNoErr(err, "logs from project/"+ns+" are not collected")
logs, err = gcl.getLogByNamespace(ns)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue())
extractedLogs, err = extractGoogleCloudLoggingLogs(logs)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(extractedLogs) > 0).Should(o.BeTrue())
o.Expect(extractedLogs[0].File == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Annotations != nil).Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Lables["test_logging_io_logging_qe-test-label"] == "logging-71753-test").Should(o.BeTrue())
exutil.By("Prune .hostname, the CLF should be rejected")
patch = `[{"op": "replace", "path": "/spec/filters/0/prune/notIn", "value": [".log_type",".log_source",".message",".kubernetes",".\"@timestamp\"",".openshift"]}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, "\"prune-logs\" prunes the `.hostname` field which is required for output: \"gcp-logging\" of type \"googleCloudLogging\"", []string{"clusterlogforwarders.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.pipelineConditions[0].message}"})
exutil.By("Combine in and notIn")
patch = `[{"op": "replace", "path": "/spec/filters/0/prune", "value": {"notIn": [".log_type",".log_source",".message",".kubernetes",".\"@timestamp\"",".hostname"],
"in": [".kubernetes.namespace_name",".kubernetes.labels.\"test.logging.io/logging.qe-test-label\"",".file",".kubernetes.annotations"]}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 30 seconds for collector pods to send new data to google cloud logging
time.Sleep(30 * time.Second)
logs, err = gcl.getLogByType("application")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue())
extractedLogs, err = extractGoogleCloudLoggingLogs(logs)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(extractedLogs) > 0).Should(o.BeTrue())
o.Expect(extractedLogs[0].File == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].OpenShift.ClusterID == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Annotations == nil).Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.NamespaceName == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Lables["test_logging_io_logging_qe-test-label"] == "").Should(o.BeTrue())
})
})
|
package logging
| ||||
test case
|
openshift/openshift-tests-private
|
f4b84296-002a-4365-b616-597489bfbeab
|
Author:qitang-CPaasrunOnly-High-53731-Forward logs to GCL using different logName for each log type and using Service Account authentication.
|
['"context"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_google_cloud_logging.go
|
g.It("Author:qitang-CPaasrunOnly-High-53731-Forward logs to GCL using different logName for each log type and using Service Account authentication.", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
logName := getInfrastructureName(oc) + "-53731"
logTypes := []string{"infrastructure", "audit", "application"}
for _, logType := range logTypes {
defer googleCloudLogging{projectID: projectID, logName: logName + "-" + logType}.removeLogs()
}
oc.SetupProject()
clfNS := oc.Namespace()
gcpSecret := resource{"secret", "gcp-secret-53731", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-53731",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "google-cloud-logging-multi-logids.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+projectID, "LOG_ID="+logName)
for _, logType := range logTypes {
gcl := googleCloudLogging{
projectID: projectID,
logName: logName + "-" + logType,
}
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByType(logType)
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s logs are not found", logType))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
d29077af-fd73-45a2-92ec-6fa47ec05605
|
CPaasrunOnly-Author:qitang-High-71003-Collect or exclude logs by matching pod expressions[Slow]
|
['"path/filepath"', '"strconv"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_google_cloud_logging.go
|
g.It("CPaasrunOnly-Author:qitang-High-71003-Collect or exclude logs by matching pod expressions[Slow]", func() {
clfNS := oc.Namespace()
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcl := googleCloudLogging{
projectID: projectID,
logName: getInfrastructureName(oc) + "-71003",
}
defer gcl.removeLogs()
gcpSecret := resource{"secret", "gcp-secret-71003", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-71003",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "googleCloudLogging.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+gcl.projectID, "LOG_ID="+gcl.logName, "INPUT_REFS=[\"application\"]")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app","type": "application","application": {"selector": {"matchExpressions": [{"key": "test.logging.io/logging.qe-test-label", "operator": "In", "values": ["logging-71003-test-0", "logging-71003-test-1", "logging-71003-test-2"]},{"key":"test", "operator":"Exists"}]}}}]}, {"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Create project for app logs and deploy the log generator")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
var namespaces []string
for i := 0; i < 4; i++ {
ns := "logging-project-71003-" + strconv.Itoa(i)
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
namespaces = append(namespaces, ns)
}
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[0], "-p", "LABELS={\"test\": \"logging-71003-0\", \"test.logging.io/logging.qe-test-label\": \"logging-71003-test-0\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[1], "-p", "LABELS={\"test.logging-71003\": \"logging-71003-1\", \"test.logging.io/logging.qe-test-label\": \"logging-71003-test-1\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[2], "-p", "LABELS={\"test.logging.io/logging.qe-test-label\": \"logging-71003-test-2\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[3], "-p", "LABELS={\"test\": \"logging-71003-3\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check data in google cloud logging, only logs from project namespaces[0] should be collected")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
appLogs1, err := gcl.getLogByNamespace(namespaces[0])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs1) > 0).Should(o.BeTrue())
for i := 1; i < 4; i++ {
appLogs, err := gcl.getLogByNamespace(namespaces[i])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) == 0).Should(o.BeTrue())
}
exutil.By("Update CLF, change the matchExpressions")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/selector/matchExpressions", "value": [{"key": "test.logging.io/logging.qe-test-label", "operator": "In", "values": ["logging-71003-test-0", "logging-71003-test-1", "logging-71003-test-2"]},{"key":"test", "operator":"DoesNotExist"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, logs from project namespaces[1] and namespaces[2] should be collected")
err = gcl.waitForLogsAppearByNamespace(namespaces[1])
exutil.AssertWaitPollNoErr(err, "can't find logs from project "+namespaces[1])
err = gcl.waitForLogsAppearByNamespace(namespaces[2])
exutil.AssertWaitPollNoErr(err, "can't find logs from project "+namespaces[2])
for _, ns := range []string{namespaces[0], namespaces[3]} {
appLogs, err := gcl.getLogByNamespace(ns)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) == 0).Should(o.BeTrue(), "find logs from project"+ns+", this is not expected")
}
exutil.By("Update CLF, change the matchExpressions")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/selector/matchExpressions", "value": [{"key": "test.logging.io/logging.qe-test-label", "operator": "NotIn", "values": ["logging-71003-test-0", "logging-71003-test-1", "logging-71003-test-2"]},{"key":"test", "operator":"Exists"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, logs from project namespaces[3] should be collected")
err = gcl.waitForLogsAppearByNamespace(namespaces[3])
exutil.AssertWaitPollNoErr(err, "can't find logs from project "+namespaces[3])
for i := 0; i < 3; i++ {
appLogs, err := gcl.getLogByNamespace(namespaces[i])
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) == 0).Should(o.BeTrue(), "find logs from project"+namespaces[i]+", this is not expected")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
35e74bbe-3ade-4903-a88a-ddf1992090ab
|
CPaasrunOnly-Author:ikanse-High-61602-Collector external Google Cloud logging complies with the tlsSecurityProfile configuration. [Slow][Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_google_cloud_logging.go
|
g.It("CPaasrunOnly-Author:ikanse-High-61602-Collector external Google Cloud logging complies with the tlsSecurityProfile configuration. [Slow][Disruptive]", func() {
g.By("Configure the global tlsSecurityProfile to use Intermediate profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"intermediate":{},"type":"Intermediate"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Create log producer")
appProj1 := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcl := googleCloudLogging{
projectID: projectID,
logName: getInfrastructureName(oc) + "-61602",
}
defer gcl.removeLogs()
gcpSecret := resource{"secret", "gcp-secret-61602", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-61602",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "googleCloudLogging.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+gcl.projectID, "LOG_ID="+gcl.logName, "INPUT_REFS=[\"application\"]")
g.By("The Google Cloud sink in Vector config must use the intermediate tlsSecurityProfile")
searchString := `[sinks.output_gcp_logging.tls]
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByType("application")
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, "application logs are not found")
appLogs1, err := gcl.getLogByNamespace(appProj1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs1) > 0).Should(o.BeTrue())
g.By("Set Modern tlsSecurityProfile for the External Google Cloud logging output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls", "value": {"securityProfile": {"type": "Modern"}}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("Delete logs from Google Cloud Logging")
err = gcl.removeLogs()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("The Google Cloud sink in Vector config must use the Modern tlsSecurityProfile")
searchString = `[sinks.output_gcp_logging.tls]
min_tls_version = "VersionTLS13"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByType("application")
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, "application logs are not found")
appLogs1, err = gcl.getLogByNamespace(appProj1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs1) > 0).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
e2cf3abb-ef6c-4eec-8cd4-c1128f7edce2
|
CPaasrunOnly-Author:qitang-Medium-71777-Include or exclude logs by combining namespace and container selectors.[Slow]
|
['"fmt"', '"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_google_cloud_logging.go
|
g.It("CPaasrunOnly-Author:qitang-Medium-71777-Include or exclude logs by combining namespace and container selectors.[Slow]", func() {
clfNS := oc.Namespace()
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcl := googleCloudLogging{
projectID: projectID,
logName: getInfrastructureName(oc) + "-71777",
}
defer gcl.removeLogs()
gcpSecret := resource{"secret", "gcp-secret-71777", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-71777",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "googleCloudLogging.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+gcl.projectID, "LOG_ID="+gcl.logName, "INPUT_REFS=[\"application\"]")
exutil.By("exclude logs from specific container in specific namespace")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app","type": "application" ,"application": {"excludes": [{"namespace": "logging-log-71777", "container": "exclude-log-71777"}]}}]}, {"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Create project for app logs and deploy the log generator")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
namespaces := []string{"logging-log-71777", "logging-data-71777", "e2e-test-log-71777"}
containerNames := []string{"log-71777-include", "exclude-log-71777"}
for _, ns := range namespaces {
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
for _, container := range containerNames {
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", ns, "-p", "CONTAINER="+container, "-p", "CONFIGMAP="+container, "-p", "REPLICATIONCONTROLLER="+container).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
generateQuery := func(ns, container string) string {
return fmt.Sprintf(` AND jsonPayload.kubernetes.namespace_name="%s" AND jsonPayload.kubernetes.container_name="%s"`, ns, container)
}
exutil.By("Check data in google cloud logging, logs from container/exclude-log-71777 in project/logging-log-71777 shouldn't be collected")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
for _, ns := range []string{"logging-data-71777", "e2e-test-log-71777"} {
for _, container := range containerNames {
appLogs, err := gcl.listLogEntries(generateQuery(ns, container))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) > 0).Should(o.BeTrue(), "can't find logs from container "+container+" in project"+ns+", this is not expected")
}
}
appLogs, err := gcl.listLogEntries(generateQuery("logging-log-71777", "log-71777-include"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) > 0).Should(o.BeTrue(), "can't find logs from container/log-71777-include in project/logging-log-71777, this is not expected")
appLogs, err = gcl.listLogEntries(generateQuery("logging-log-71777", "exclude-log-71777"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) == 0).Should(o.BeTrue(), "find logs from container/exclude-log-71777 in project/logging-log-71777, this is not expected")
exutil.By("exclude logs from specific containers in all namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/excludes", "value": [{"namespace": "*", "container": "exclude-log-71777"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, no logs from container/exclude-log-71777")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
for _, ns := range namespaces {
err = gcl.waitForLogsAppearByNamespace(ns)
exutil.AssertWaitPollNoErr(err, "can't find logs from project "+ns)
}
logs, err := gcl.listLogEntries(` AND jsonPayload.kubernetes.container_name="exclude-log-71777"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) == 0).Should(o.BeTrue(), "find logs from container exclude-log-71777, this is not expected")
exutil.By("exclude logs from all containers in specific namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/excludes", "value": [{"namespace": "e2e-test-log-71777", "container": "*"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, no logs from project/e2e-test-log-71777")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
for _, ns := range []string{"logging-log-71777", "logging-data-71777"} {
for _, container := range containerNames {
appLogs, err := gcl.listLogEntries(generateQuery(ns, container))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) > 0).Should(o.BeTrue(), "can't find logs from container "+container+" in project"+ns+", this is not expected")
}
}
logs, err = gcl.getLogByNamespace("e2e-test-log-71777")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) == 0).Should(o.BeTrue(), "find logs from project e2e-test-log-71777, this is not expected")
exutil.By("Update CLF to collect logs from specific containers in specific namespaces")
patch = `[{"op": "remove", "path": "/spec/inputs/0/application/excludes"}, {"op": "add", "path": "/spec/inputs/0/application", "value": {"includes": [{"namespace": "logging-log-71777", "container": "log-71777-include"}]}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, only logs from container log-71777-include in project logging-log-71777 should be collected")
err = gcl.waitForLogsAppearByNamespace("logging-log-71777")
exutil.AssertWaitPollNoErr(err, "logs from project logging-log-71777 are not collected")
includeLogs, err := gcl.listLogEntries(generateQuery("logging-log-71777", "log-71777-include"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(includeLogs) > 0).Should(o.BeTrue(), "can't find logs from container log-71777-include in project logging-log-71777, this is not expected")
excludeLogs, err := gcl.listLogEntries(` AND jsonPayload.kubernetes.namespace_name!="logging-log-71777" OR jsonPayload.kubernetes.container_name!="log-71777-include"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(excludeLogs) == 0).Should(o.BeTrue(), "find logs from other containers, this is not expected")
exutil.By("collect logs from specific containers in all namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/includes", "value": [{"namespace": "*", "container": "log-71777-include"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, only logs from container/log-71777-include should be collected")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
for _, ns := range namespaces {
err = gcl.waitForLogsAppearByNamespace(ns)
exutil.AssertWaitPollNoErr(err, "can't find logs from project "+ns)
}
logs, err = gcl.listLogEntries(` AND jsonPayload.kubernetes.container_name != "log-71777-include"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) == 0).Should(o.BeTrue(), "find logs from other containers, this is not expected")
// no logs from openshift* projects
exutil.By("collect logs from all containers in specific namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/includes", "value": [{"namespace": "logging-data-71777", "container": "*"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, only logs from project/logging-data-71777 should be collected")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
logs, err = gcl.listLogEntries(` AND jsonPayload.kubernetes.namespace_name != "logging-data-71777"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) == 0).Should(o.BeTrue(), "find logs from other projects, this is not expected")
logs, err = gcl.getLogByNamespace("logging-data-71777")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue(), "can't find logs from project logging-data-71777, this is not expected")
exutil.By("combine includes and excludes")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application", "value": {"includes": [{"namespace": "*log*", "container": "log-71777*"}], "excludes": [{"namespace": "logging*71777", "container": "log-71777-include"}]}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
gcl.removeLogs()
exutil.By("Check data in google cloud logging, only logs from container/log-71777-include in project/e2e-test-log-71777 should be collected")
err = gcl.waitForLogsAppearByType("application")
exutil.AssertWaitPollNoErr(err, "application logs are not collected")
logs, err = gcl.listLogEntries(generateQuery("e2e-test-log-71777", "log-71777-include"))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue(), "can't find logs from container log-71777-include in project e2e-test-log-71777, this is not expected")
logs, err = gcl.listLogEntries(` AND jsonPayload.kubernetes.namespace_name!="e2e-test-log-71777" OR jsonPayload.kubernetes.container_name!="log-71777-include"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) == 0).Should(o.BeTrue(), "find logs from other containers, this is not expected")
})
| |||||
test case
|
openshift/openshift-tests-private
|
a7bcce9d-2a23-48bb-a875-a2d4c17d8ab6
|
CPaasrunOnly-ConnectedOnly-Author:qitang-Medium-71753-Prune fields from log messages
|
['"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_google_cloud_logging.go
|
g.It("CPaasrunOnly-ConnectedOnly-Author:qitang-Medium-71753-Prune fields from log messages", func() {
exutil.By("Create CLF")
clfNS := oc.Namespace()
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcl := googleCloudLogging{
projectID: projectID,
logName: getInfrastructureName(oc) + "-71753",
}
defer gcl.removeLogs()
gcpSecret := resource{"secret", "gcp-secret-71753", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-71753",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "googleCloudLogging.yaml"),
collectApplicationLogs: true,
collectInfrastructureLogs: true,
collectAuditLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+gcl.projectID, "LOG_ID="+gcl.logName)
exutil.By("Add prune filters to CLF")
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "prune-logs", "type": "prune", "prune": {"in": [".kubernetes.namespace_name",".kubernetes.labels.\"test.logging.io/logging.qe-test-label\"",".file",".kubernetes.annotations"]}}]},
{"op": "add", "path": "/spec/pipelines/0/filterRefs", "value": ["prune-logs"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Create project for app logs and deploy the log generator")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
oc.SetupProject()
ns := oc.Namespace()
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", ns, "-p", "LABELS={\"test\": \"logging-71753-test\", \"test.logging.io/logging.qe-test-label\": \"logging-71753-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check logs in google cloud logging")
for _, logType := range []string{"application", "infrastructure", "audit"} {
err = gcl.waitForLogsAppearByType(logType)
exutil.AssertWaitPollNoErr(err, logType+" logs are not collected")
}
exutil.By("Fields kubernetes.namespace_name, kubernetes.labels.\"test.logging.io/logging.qe-test-label\", kubernetes.annotations and file should be pruned")
// sleep 30 seconds for collector pods to send new data to google cloud logging
time.Sleep(30 * time.Second)
logs, err := gcl.getLogByType("application")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue())
extractedLogs, err := extractGoogleCloudLoggingLogs(logs)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(extractedLogs) > 0).Should(o.BeTrue())
o.Expect(extractedLogs[0].File == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Annotations == nil).Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.NamespaceName == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Lables["test_logging_io_logging_qe-test-label"] == "").Should(o.BeTrue())
exutil.By("Prune .hostname, the CLF should be rejected")
patch = `[{"op": "replace", "path": "/spec/filters/0/prune/in", "value": [".hostname",".kubernetes.namespace_name",".kubernetes.labels.\"test.logging.io/logging.qe-test-label\"",".file",".kubernetes.annotations"]}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, "\"prune-logs\" prunes the `.hostname` field which is required for output: \"gcp-logging\" of type \"googleCloudLogging\"", []string{"clusterlogforwarders.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.pipelineConditions[0].message}"})
exutil.By("Update CLF to only reserve several fields")
patch = `[{"op": "replace", "path": "/spec/filters/0/prune", "value": {"notIn": [".log_type",".log_source",".message",".kubernetes",".\"@timestamp\"",".openshift",".hostname"]}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 60 seconds for collector pods to send new data to google cloud logging
time.Sleep(60 * time.Second)
exutil.By("Check logs in google cloud logging")
err = gcl.waitForLogsAppearByNamespace(ns)
exutil.AssertWaitPollNoErr(err, "logs from project/"+ns+" are not collected")
logs, err = gcl.getLogByNamespace(ns)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue())
extractedLogs, err = extractGoogleCloudLoggingLogs(logs)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(extractedLogs) > 0).Should(o.BeTrue())
o.Expect(extractedLogs[0].File == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Annotations != nil).Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Lables["test_logging_io_logging_qe-test-label"] == "logging-71753-test").Should(o.BeTrue())
exutil.By("Prune .hostname, the CLF should be rejected")
patch = `[{"op": "replace", "path": "/spec/filters/0/prune/notIn", "value": [".log_type",".log_source",".message",".kubernetes",".\"@timestamp\"",".openshift"]}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, "\"prune-logs\" prunes the `.hostname` field which is required for output: \"gcp-logging\" of type \"googleCloudLogging\"", []string{"clusterlogforwarders.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.pipelineConditions[0].message}"})
exutil.By("Combine in and notIn")
patch = `[{"op": "replace", "path": "/spec/filters/0/prune", "value": {"notIn": [".log_type",".log_source",".message",".kubernetes",".\"@timestamp\"",".hostname"],
"in": [".kubernetes.namespace_name",".kubernetes.labels.\"test.logging.io/logging.qe-test-label\"",".file",".kubernetes.annotations"]}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 30 seconds for collector pods to send new data to google cloud logging
time.Sleep(30 * time.Second)
logs, err = gcl.getLogByType("application")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue())
extractedLogs, err = extractGoogleCloudLoggingLogs(logs)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(extractedLogs) > 0).Should(o.BeTrue())
o.Expect(extractedLogs[0].File == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].OpenShift.ClusterID == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Annotations == nil).Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.NamespaceName == "").Should(o.BeTrue())
o.Expect(extractedLogs[0].Kubernetes.Lables["test_logging_io_logging_qe-test-label"] == "").Should(o.BeTrue())
})
| |||||
test
|
openshift/openshift-tests-private
|
ddbf7271-1c3c-4d60-a75c-25bb9b0d1d19
|
vector_kafka
|
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_kafka.go
|
package logging
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vector-kafka", exutil.KubeConfigPath())
loggingBaseDir string
)
g.Context("Log Forward to Kafka via Vector as Collector", func() {
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO")
CLO.SubscribeOperator(oc)
oc.SetupProject()
})
g.It("Author:ikanse-CPaasrunOnly-Medium-49369-Vector Forward logs to kafka topic via Mutual Chained certificates", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kafkaNS := "openshift-kafka-" + getRandomString()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", kafkaNS, "--wait=false").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", kafkaNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kafka := kafka{
namespace: kafkaNS,
kafkasvcName: "kafka",
zoosvcName: "zookeeper",
authtype: "plaintext-ssl",
pipelineSecret: "kafka-vector",
collectorType: "vector",
loggingNS: appProj,
}
g.By("Deploy zookeeper")
defer kafka.removeZookeeper(oc)
kafka.deployZookeeper(oc)
g.By("Deploy kafka")
defer kafka.removeKafka(oc)
kafka.deployKafka(oc)
kafkaEndpoint := "tls://" + kafka.kafkasvcName + "." + kafka.namespace + ".svc.cluster.local:9093/clo-topic"
g.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-49369",
namespace: appProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-no-auth.yaml"),
secretName: kafka.pipelineSecret,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL="+kafkaEndpoint)
g.By("Check app logs in kafka consumer pod")
consumerPodPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", kafka.namespace, "-l", "component=kafka-consumer", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := getDataFromKafkaByNamespace(oc, kafka.namespace, consumerPodPodName, appProj)
if err != nil {
return false, err
}
return len(appLogs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("App logs are not found in %s/%s", kafka.namespace, consumerPodPodName))
})
g.It("Author:ikanse-CPaasrunOnly-Medium-52420-Vector Forward logs to kafka using SASL plaintext", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kafkaNS := "openshift-kafka-" + getRandomString()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", kafkaNS, "--wait=false").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", kafkaNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy zookeeper")
kafka := kafka{
namespace: kafkaNS,
kafkasvcName: "kafka",
zoosvcName: "zookeeper",
authtype: "sasl-plaintext",
pipelineSecret: "vector-kafka",
collectorType: "vector",
loggingNS: appProj,
}
defer kafka.removeZookeeper(oc)
kafka.deployZookeeper(oc)
g.By("Deploy kafka")
defer kafka.removeKafka(oc)
kafka.deployKafka(oc)
kafkaEndpoint := "http://" + kafka.kafkasvcName + "." + kafka.namespace + ".svc.cluster.local:9092/clo-topic"
g.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-52420",
namespace: appProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-with-auth.yaml"),
secretName: kafka.pipelineSecret,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL="+kafkaEndpoint)
// Remove tls configuration from CLF as it is not required for this case
patch := `[{"op": "remove", "path": "/spec/outputs/0/tls"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("Check app logs in kafka consumer pod")
consumerPodPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", kafka.namespace, "-l", "component=kafka-consumer", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := getDataFromKafkaByNamespace(oc, kafka.namespace, consumerPodPodName, appProj)
if err != nil {
return false, err
}
return len(appLogs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("App logs are not found in %s/%s", kafka.namespace, consumerPodPodName))
})
g.It("Author:anli-CPaasrunOnly-WRS-Critical-68312-VA-IAC.03-Forward to Kafka using SSL-SASL_SCRAM auth", func() {
amqNS := oc.Namespace()
g.By("crete kafka instance")
amqi := amqInstance{
name: "my-cluster",
namespace: amqNS,
topicPrefix: "logging-topic",
instanceType: "kafka-sasl-cluster",
}
defer amqi.destroy(oc)
amqi.deploy(oc)
topicName := "logging-topic-52496"
consumerPodName := amqi.createTopicAndConsumber(oc, topicName)
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("forward logs to Kafkas using ssl sasl scram-sha-512")
clf := clusterlogforwarder{
name: "clf-52496",
namespace: amqNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-sasl-ssl.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: false,
collectInfrastructureLogs: false,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
secretName := "secret-for-kafka-52420"
oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "-n", clf.namespace, "--from-literal=username="+amqi.user, "--from-literal=password="+amqi.password, "--from-literal=ca-bundle.crt="+amqi.routeCA).Execute()
//To reduce the logs collected, we only collect app logs from appProj
//Note: all sasl and tls data are from secret clf-to-amq with fix name -- user,password,ca
clf.create(oc, "URL=tls://"+amqi.route+"/"+topicName, "SECRET_NAME="+secretName, "NAMESPACE_PATTERN="+appProj)
g.By("verifiy the data are sent to kafka")
//application logs
logs, err := getDataFromKafkaConsumerPod(oc, amqi.namespace, consumerPodName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue(), "Can not find any logs from kafka consumer pods")
})
// author [email protected]
g.It("Author:qitang-CPaasrunOnly-Medium-47036-Vector Forward logs to different AMQ Kafka topics[Slow]", func() {
nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: "kubernetes.io/os=linux,kubernetes.io/arch=amd64"})
if err != nil || len(nodes.Items) == 0 {
g.Skip("Skip for the cluster doesn't have amd64 node")
}
amqNS := oc.Namespace()
g.By("crete kafka instance")
amqi := amqInstance{
name: "my-cluster",
namespace: amqNS,
topicPrefix: "topic-logging",
instanceType: "kafka-sasl-cluster",
}
defer amqi.destroy(oc)
amqi.deploy(oc)
//topic name are fix value in clf_kafka_multi_topics.yaml
consumerAppPodName := amqi.createTopicAndConsumber(oc, amqi.topicPrefix+"-app")
consumerInfraPodName := amqi.createTopicAndConsumber(oc, amqi.topicPrefix+"-infra")
consumerAuditPodName := amqi.createTopicAndConsumber(oc, amqi.topicPrefix+"-audit")
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("forward logs to Kafkas")
clf := clusterlogforwarder{
name: "clf-47036",
namespace: amqNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-multi-topics.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
secretName := "secret-for-kafka-47036"
oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "-n", clf.namespace, "--from-literal=username="+amqi.user, "--from-literal=password="+amqi.password, "--from-literal=ca-bundle.crt="+amqi.routeCA).Execute()
defer clf.delete(oc)
clf.create(oc, "BOOTSTRAP_SVC="+amqi.service, "NAMESPACE_PATTERN="+appProj, "APP_TOPIC="+amqi.topicPrefix+"-app", "INFRA_TOPIC="+amqi.topicPrefix+"-infra", "AUDIT_TOPIC="+amqi.topicPrefix+"-audit", "SECRET_NAME="+secretName)
g.By("check data in kafka")
//app logs
appLogs, err := getDataFromKafkaConsumerPod(oc, amqNS, consumerAppPodName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) > 0).Should(o.BeTrue(), "Can not find any logs from topic-logging-app-consumer")
e2e.Logf("found app logs \n")
//infrastructure logs
infraLogs, err := getDataFromKafkaConsumerPod(oc, amqNS, consumerInfraPodName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(infraLogs) > 0).Should(o.BeTrue(), "Can not find any logs from topic-logging-infra-consumer")
o.Expect(infraLogs[0].LogType == "infrastructure").Should(o.BeTrue(), "Can not find infra logs in consumer pod")
e2e.Logf("found infra logs \n")
//audit logs
auditLogs, err := getDataFromKafkaConsumerPod(oc, amqNS, consumerAuditPodName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(auditLogs) > 0).Should(o.BeTrue(), "Can not find any logs from topic-logging-audit-consumer")
o.Expect(auditLogs[0].LogType == "audit").Should(o.BeTrue(), "Can not find audit logs in consumer pod")
e2e.Logf("found audit logs \n")
})
// author [email protected]
g.It("Author:qitang-CPaasrunOnly-Medium-48141-Vector Forward logs to different Kafka brokers.[Slow]", func() {
nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: "kubernetes.io/os=linux,kubernetes.io/arch=amd64"})
if err != nil || len(nodes.Items) == 0 {
g.Skip("Skip for the cluster doesn't have amd64 node")
}
//create project at first , so the OLM has more time to prepare CSV in these namespaces
amqi1NS := oc.Namespace()
oc.SetupProject()
amqi2NS := oc.Namespace()
g.By("deploy AMQ kafka instance in two different namespaces")
// to avoid collecting kafka logs, deploy kafka in project openshift-*
// In general, we send data to brokers in kafka cluster. for historical, we use two clusters here. toDo: launch one cluster with more than one broker
topicName := "topic-logging"
amqi1 := amqInstance{
name: "my-cluster",
namespace: amqi1NS,
topicPrefix: topicName,
instanceType: "kafka-no-auth-cluster",
}
amqi2 := amqInstance{
name: "my-cluster",
namespace: amqi2NS,
topicPrefix: topicName,
instanceType: "kafka-no-auth-cluster",
}
defer amqi1.destroy(oc)
amqi1.deploy(oc)
amqi1ConsumerPodName := amqi1.createTopicAndConsumber(oc, topicName)
defer amqi2.destroy(oc)
amqi2.deploy(oc)
amqi2ConsumerPodName := amqi2.createTopicAndConsumber(oc, topicName)
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
// avoid hitting https://issues.redhat.com/browse/LOG-3025, set replicas to 3
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile, "-p", "REPLICAS=3").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("forward logs to Kafkas")
clf := clusterlogforwarder{
name: "clf-48141",
namespace: amqi1NS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-multi-brokers.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: false,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
brokers, _ := json.Marshal([]string{"tls://" + amqi1.service, "tls://" + amqi2.service})
clf.create(oc, "TOPIC="+topicName, "BROKERS="+string(brokers), "NAMESPACE_PATTERN="+appProj)
g.By("check data in the first broker")
amqi1logs, _ := getDataFromKafkaConsumerPod(oc, amqi1.namespace, amqi1ConsumerPodName)
o.Expect(len(amqi1logs) > 0).Should(o.BeTrue(), "Can not fetch any logs from broker1-consumer")
g.By("check data in the second broker")
amqi2logs, _ := getDataFromKafkaConsumerPod(oc, amqi2.namespace, amqi2ConsumerPodName)
o.Expect(len(amqi2logs) > 0).Should(o.BeTrue(), "Can not fetch any logs from broker2-consumer")
})
g.It("Author:ikanse-CPaasrunOnly-High-61549-Collector-External Kafka output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]", func() {
g.By("Configure the global tlsSecurityProfile to use custom profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"custom":{"ciphers":["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256"],"minTLSVersion":"VersionTLS12"},"type":"Custom"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploy zookeeper")
kafka := kafka{
namespace: loggingNS,
kafkasvcName: "kafka",
zoosvcName: "zookeeper",
authtype: "sasl-ssl",
pipelineSecret: "vector-kafka",
collectorType: "vector",
loggingNS: loggingNS,
}
defer kafka.removeZookeeper(oc)
kafka.deployZookeeper(oc)
g.By("Deploy kafka")
defer kafka.removeKafka(oc)
kafka.deployKafka(oc)
kafkaEndpoint := "tls://" + kafka.kafkasvcName + "." + kafka.namespace + ".svc.cluster.local:9093/clo-topic"
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-61549",
namespace: loggingNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-with-auth.yaml"),
secretName: kafka.pipelineSecret,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL="+kafkaEndpoint, "TLS_SECRET_NAME="+clf.secretName)
g.By("The Kafka sink in Vector config must use the Custom tlsSecurityProfile")
searchString := `[sinks.output_kafka_app.tls]
enabled = true
min_tls_version = "VersionTLS12"
ciphersuites = "ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256"
key_file = "/var/run/ocp-collector/secrets/vector-kafka/tls.key"
crt_file = "/var/run/ocp-collector/secrets/vector-kafka/tls.crt"
ca_file = "/var/run/ocp-collector/secrets/vector-kafka/ca-bundle.crt"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check app logs in kafka consumer pod")
consumerPodPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", kafka.namespace, "-l", "component=kafka-consumer", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := getDataFromKafkaByNamespace(oc, kafka.namespace, consumerPodPodName, appProj)
if err != nil {
return false, err
}
return len(appLogs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("App logs are not found in %s/%s", kafka.namespace, consumerPodPodName))
g.By("Set Old tlsSecurityProfile for the External Kafka output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls/securityProfile", "value": {"type": "Old"}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("Deploy the log generator app")
oc.SetupProject()
appProj1 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("The Kafka sink in Vector config must use the Old tlsSecurityProfile")
searchString = `[sinks.output_kafka_app.tls]
enabled = true
min_tls_version = "VersionTLS10"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384,DHE-RSA-CHACHA20-POLY1305,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-RSA-AES128-SHA256,DHE-RSA-AES256-SHA256,AES128-GCM-SHA256,AES256-GCM-SHA384,AES128-SHA256,AES256-SHA256,AES128-SHA,AES256-SHA,DES-CBC3-SHA"
key_file = "/var/run/ocp-collector/secrets/vector-kafka/tls.key"
crt_file = "/var/run/ocp-collector/secrets/vector-kafka/tls.crt"
ca_file = "/var/run/ocp-collector/secrets/vector-kafka/ca-bundle.crt"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external Kafka server.")
g.By("Check app logs in kafka consumer pod")
consumerPodPodName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", kafka.namespace, "-l", "component=kafka-consumer", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := getDataFromKafkaByNamespace(oc, kafka.namespace, consumerPodPodName, appProj1)
if err != nil {
return false, err
}
return len(appLogs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("App logs are not found in %s/%s", kafka.namespace, consumerPodPodName))
})
})
})
|
package logging
| ||||
test case
|
openshift/openshift-tests-private
|
8fa8a554-bf35-47c2-a203-2ad51344b743
|
Author:ikanse-CPaasrunOnly-Medium-49369-Vector Forward logs to kafka topic via Mutual Chained certificates
|
['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_kafka.go
|
g.It("Author:ikanse-CPaasrunOnly-Medium-49369-Vector Forward logs to kafka topic via Mutual Chained certificates", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kafkaNS := "openshift-kafka-" + getRandomString()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", kafkaNS, "--wait=false").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", kafkaNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kafka := kafka{
namespace: kafkaNS,
kafkasvcName: "kafka",
zoosvcName: "zookeeper",
authtype: "plaintext-ssl",
pipelineSecret: "kafka-vector",
collectorType: "vector",
loggingNS: appProj,
}
g.By("Deploy zookeeper")
defer kafka.removeZookeeper(oc)
kafka.deployZookeeper(oc)
g.By("Deploy kafka")
defer kafka.removeKafka(oc)
kafka.deployKafka(oc)
kafkaEndpoint := "tls://" + kafka.kafkasvcName + "." + kafka.namespace + ".svc.cluster.local:9093/clo-topic"
g.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-49369",
namespace: appProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-no-auth.yaml"),
secretName: kafka.pipelineSecret,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL="+kafkaEndpoint)
g.By("Check app logs in kafka consumer pod")
consumerPodPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", kafka.namespace, "-l", "component=kafka-consumer", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := getDataFromKafkaByNamespace(oc, kafka.namespace, consumerPodPodName, appProj)
if err != nil {
return false, err
}
return len(appLogs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("App logs are not found in %s/%s", kafka.namespace, consumerPodPodName))
})
| |||||
test case
|
openshift/openshift-tests-private
|
0d8e1ac2-4c89-4854-8752-32b1791f5c4e
|
Author:ikanse-CPaasrunOnly-Medium-52420-Vector Forward logs to kafka using SASL plaintext
|
['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_kafka.go
|
g.It("Author:ikanse-CPaasrunOnly-Medium-52420-Vector Forward logs to kafka using SASL plaintext", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kafkaNS := "openshift-kafka-" + getRandomString()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", kafkaNS, "--wait=false").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", kafkaNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy zookeeper")
kafka := kafka{
namespace: kafkaNS,
kafkasvcName: "kafka",
zoosvcName: "zookeeper",
authtype: "sasl-plaintext",
pipelineSecret: "vector-kafka",
collectorType: "vector",
loggingNS: appProj,
}
defer kafka.removeZookeeper(oc)
kafka.deployZookeeper(oc)
g.By("Deploy kafka")
defer kafka.removeKafka(oc)
kafka.deployKafka(oc)
kafkaEndpoint := "http://" + kafka.kafkasvcName + "." + kafka.namespace + ".svc.cluster.local:9092/clo-topic"
g.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-52420",
namespace: appProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-with-auth.yaml"),
secretName: kafka.pipelineSecret,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL="+kafkaEndpoint)
// Remove tls configuration from CLF as it is not required for this case
patch := `[{"op": "remove", "path": "/spec/outputs/0/tls"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("Check app logs in kafka consumer pod")
consumerPodPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", kafka.namespace, "-l", "component=kafka-consumer", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := getDataFromKafkaByNamespace(oc, kafka.namespace, consumerPodPodName, appProj)
if err != nil {
return false, err
}
return len(appLogs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("App logs are not found in %s/%s", kafka.namespace, consumerPodPodName))
})
| |||||
test case
|
openshift/openshift-tests-private
|
79d1446b-4ebe-439c-a8f0-8751ac86bdf9
|
Author:anli-CPaasrunOnly-WRS-Critical-68312-VA-IAC.03-Forward to Kafka using SSL-SASL_SCRAM auth
|
['"encoding/json"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_kafka.go
|
g.It("Author:anli-CPaasrunOnly-WRS-Critical-68312-VA-IAC.03-Forward to Kafka using SSL-SASL_SCRAM auth", func() {
amqNS := oc.Namespace()
g.By("crete kafka instance")
amqi := amqInstance{
name: "my-cluster",
namespace: amqNS,
topicPrefix: "logging-topic",
instanceType: "kafka-sasl-cluster",
}
defer amqi.destroy(oc)
amqi.deploy(oc)
topicName := "logging-topic-52496"
consumerPodName := amqi.createTopicAndConsumber(oc, topicName)
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("forward logs to Kafkas using ssl sasl scram-sha-512")
clf := clusterlogforwarder{
name: "clf-52496",
namespace: amqNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-sasl-ssl.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: false,
collectInfrastructureLogs: false,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
secretName := "secret-for-kafka-52420"
oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "-n", clf.namespace, "--from-literal=username="+amqi.user, "--from-literal=password="+amqi.password, "--from-literal=ca-bundle.crt="+amqi.routeCA).Execute()
//To reduce the logs collected, we only collect app logs from appProj
//Note: all sasl and tls data are from secret clf-to-amq with fix name -- user,password,ca
clf.create(oc, "URL=tls://"+amqi.route+"/"+topicName, "SECRET_NAME="+secretName, "NAMESPACE_PATTERN="+appProj)
g.By("verifiy the data are sent to kafka")
//application logs
logs, err := getDataFromKafkaConsumerPod(oc, amqi.namespace, consumerPodName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(logs) > 0).Should(o.BeTrue(), "Can not find any logs from kafka consumer pods")
})
| |||||
test case
|
openshift/openshift-tests-private
|
d1858eb8-654b-4c13-87e4-38f90bc64452
|
Author:qitang-CPaasrunOnly-Medium-47036-Vector Forward logs to different AMQ Kafka topics[Slow]
|
['"context"', '"encoding/json"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_kafka.go
|
g.It("Author:qitang-CPaasrunOnly-Medium-47036-Vector Forward logs to different AMQ Kafka topics[Slow]", func() {
nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: "kubernetes.io/os=linux,kubernetes.io/arch=amd64"})
if err != nil || len(nodes.Items) == 0 {
g.Skip("Skip for the cluster doesn't have amd64 node")
}
amqNS := oc.Namespace()
g.By("crete kafka instance")
amqi := amqInstance{
name: "my-cluster",
namespace: amqNS,
topicPrefix: "topic-logging",
instanceType: "kafka-sasl-cluster",
}
defer amqi.destroy(oc)
amqi.deploy(oc)
//topic name are fix value in clf_kafka_multi_topics.yaml
consumerAppPodName := amqi.createTopicAndConsumber(oc, amqi.topicPrefix+"-app")
consumerInfraPodName := amqi.createTopicAndConsumber(oc, amqi.topicPrefix+"-infra")
consumerAuditPodName := amqi.createTopicAndConsumber(oc, amqi.topicPrefix+"-audit")
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("forward logs to Kafkas")
clf := clusterlogforwarder{
name: "clf-47036",
namespace: amqNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-multi-topics.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
secretName := "secret-for-kafka-47036"
oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "-n", clf.namespace, "--from-literal=username="+amqi.user, "--from-literal=password="+amqi.password, "--from-literal=ca-bundle.crt="+amqi.routeCA).Execute()
defer clf.delete(oc)
clf.create(oc, "BOOTSTRAP_SVC="+amqi.service, "NAMESPACE_PATTERN="+appProj, "APP_TOPIC="+amqi.topicPrefix+"-app", "INFRA_TOPIC="+amqi.topicPrefix+"-infra", "AUDIT_TOPIC="+amqi.topicPrefix+"-audit", "SECRET_NAME="+secretName)
g.By("check data in kafka")
//app logs
appLogs, err := getDataFromKafkaConsumerPod(oc, amqNS, consumerAppPodName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLogs) > 0).Should(o.BeTrue(), "Can not find any logs from topic-logging-app-consumer")
e2e.Logf("found app logs \n")
//infrastructure logs
infraLogs, err := getDataFromKafkaConsumerPod(oc, amqNS, consumerInfraPodName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(infraLogs) > 0).Should(o.BeTrue(), "Can not find any logs from topic-logging-infra-consumer")
o.Expect(infraLogs[0].LogType == "infrastructure").Should(o.BeTrue(), "Can not find infra logs in consumer pod")
e2e.Logf("found infra logs \n")
//audit logs
auditLogs, err := getDataFromKafkaConsumerPod(oc, amqNS, consumerAuditPodName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(auditLogs) > 0).Should(o.BeTrue(), "Can not find any logs from topic-logging-audit-consumer")
o.Expect(auditLogs[0].LogType == "audit").Should(o.BeTrue(), "Can not find audit logs in consumer pod")
e2e.Logf("found audit logs \n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
6e361a1f-b3af-4f5a-82f6-8da6ab5e9dac
|
Author:qitang-CPaasrunOnly-Medium-48141-Vector Forward logs to different Kafka brokers.[Slow]
|
['"context"', '"encoding/json"', '"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_kafka.go
|
g.It("Author:qitang-CPaasrunOnly-Medium-48141-Vector Forward logs to different Kafka brokers.[Slow]", func() {
nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: "kubernetes.io/os=linux,kubernetes.io/arch=amd64"})
if err != nil || len(nodes.Items) == 0 {
g.Skip("Skip for the cluster doesn't have amd64 node")
}
//create project at first , so the OLM has more time to prepare CSV in these namespaces
amqi1NS := oc.Namespace()
oc.SetupProject()
amqi2NS := oc.Namespace()
g.By("deploy AMQ kafka instance in two different namespaces")
// to avoid collecting kafka logs, deploy kafka in project openshift-*
// In general, we send data to brokers in kafka cluster. for historical, we use two clusters here. toDo: launch one cluster with more than one broker
topicName := "topic-logging"
amqi1 := amqInstance{
name: "my-cluster",
namespace: amqi1NS,
topicPrefix: topicName,
instanceType: "kafka-no-auth-cluster",
}
amqi2 := amqInstance{
name: "my-cluster",
namespace: amqi2NS,
topicPrefix: topicName,
instanceType: "kafka-no-auth-cluster",
}
defer amqi1.destroy(oc)
amqi1.deploy(oc)
amqi1ConsumerPodName := amqi1.createTopicAndConsumber(oc, topicName)
defer amqi2.destroy(oc)
amqi2.deploy(oc)
amqi2ConsumerPodName := amqi2.createTopicAndConsumber(oc, topicName)
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
// avoid hitting https://issues.redhat.com/browse/LOG-3025, set replicas to 3
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile, "-p", "REPLICAS=3").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("forward logs to Kafkas")
clf := clusterlogforwarder{
name: "clf-48141",
namespace: amqi1NS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-multi-brokers.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: false,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
brokers, _ := json.Marshal([]string{"tls://" + amqi1.service, "tls://" + amqi2.service})
clf.create(oc, "TOPIC="+topicName, "BROKERS="+string(brokers), "NAMESPACE_PATTERN="+appProj)
g.By("check data in the first broker")
amqi1logs, _ := getDataFromKafkaConsumerPod(oc, amqi1.namespace, amqi1ConsumerPodName)
o.Expect(len(amqi1logs) > 0).Should(o.BeTrue(), "Can not fetch any logs from broker1-consumer")
g.By("check data in the second broker")
amqi2logs, _ := getDataFromKafkaConsumerPod(oc, amqi2.namespace, amqi2ConsumerPodName)
o.Expect(len(amqi2logs) > 0).Should(o.BeTrue(), "Can not fetch any logs from broker2-consumer")
})
| |||||
test case
|
openshift/openshift-tests-private
|
90e75046-b54b-40be-8f9a-ba0b9cfd3b2d
|
Author:ikanse-CPaasrunOnly-High-61549-Collector-External Kafka output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]
|
['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_kafka.go
|
g.It("Author:ikanse-CPaasrunOnly-High-61549-Collector-External Kafka output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]", func() {
g.By("Configure the global tlsSecurityProfile to use custom profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"custom":{"ciphers":["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256"],"minTLSVersion":"VersionTLS12"},"type":"Custom"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploy zookeeper")
kafka := kafka{
namespace: loggingNS,
kafkasvcName: "kafka",
zoosvcName: "zookeeper",
authtype: "sasl-ssl",
pipelineSecret: "vector-kafka",
collectorType: "vector",
loggingNS: loggingNS,
}
defer kafka.removeZookeeper(oc)
kafka.deployZookeeper(oc)
g.By("Deploy kafka")
defer kafka.removeKafka(oc)
kafka.deployKafka(oc)
kafkaEndpoint := "tls://" + kafka.kafkasvcName + "." + kafka.namespace + ".svc.cluster.local:9093/clo-topic"
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-61549",
namespace: loggingNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-kafka-with-auth.yaml"),
secretName: kafka.pipelineSecret,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL="+kafkaEndpoint, "TLS_SECRET_NAME="+clf.secretName)
g.By("The Kafka sink in Vector config must use the Custom tlsSecurityProfile")
searchString := `[sinks.output_kafka_app.tls]
enabled = true
min_tls_version = "VersionTLS12"
ciphersuites = "ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256"
key_file = "/var/run/ocp-collector/secrets/vector-kafka/tls.key"
crt_file = "/var/run/ocp-collector/secrets/vector-kafka/tls.crt"
ca_file = "/var/run/ocp-collector/secrets/vector-kafka/ca-bundle.crt"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check app logs in kafka consumer pod")
consumerPodPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", kafka.namespace, "-l", "component=kafka-consumer", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := getDataFromKafkaByNamespace(oc, kafka.namespace, consumerPodPodName, appProj)
if err != nil {
return false, err
}
return len(appLogs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("App logs are not found in %s/%s", kafka.namespace, consumerPodPodName))
g.By("Set Old tlsSecurityProfile for the External Kafka output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls/securityProfile", "value": {"type": "Old"}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("Deploy the log generator app")
oc.SetupProject()
appProj1 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("The Kafka sink in Vector config must use the Old tlsSecurityProfile")
searchString = `[sinks.output_kafka_app.tls]
enabled = true
min_tls_version = "VersionTLS10"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384,DHE-RSA-CHACHA20-POLY1305,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-RSA-AES128-SHA256,DHE-RSA-AES256-SHA256,AES128-GCM-SHA256,AES256-GCM-SHA384,AES128-SHA256,AES256-SHA256,AES128-SHA,AES256-SHA,DES-CBC3-SHA"
key_file = "/var/run/ocp-collector/secrets/vector-kafka/tls.key"
crt_file = "/var/run/ocp-collector/secrets/vector-kafka/tls.crt"
ca_file = "/var/run/ocp-collector/secrets/vector-kafka/ca-bundle.crt"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external Kafka server.")
g.By("Check app logs in kafka consumer pod")
consumerPodPodName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", kafka.namespace, "-l", "component=kafka-consumer", "-o", "name").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := getDataFromKafkaByNamespace(oc, kafka.namespace, consumerPodPodName, appProj1)
if err != nil {
return false, err
}
return len(appLogs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("App logs are not found in %s/%s", kafka.namespace, consumerPodPodName))
})
| |||||
test
|
openshift/openshift-tests-private
|
19e35d56-8784-4d4f-bd1e-5c3f1c149db5
|
vector_loki
|
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"reflect"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"gopkg.in/yaml.v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
package logging
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"reflect"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"gopkg.in/yaml.v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vector-loki", exutil.KubeConfigPath())
loggingBaseDir string
)
g.Context("test forward logs to external loki log store", func() {
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO")
CLO.SubscribeOperator(oc)
oc.SetupProject()
})
g.It("CPaasrunOnly-Author:ikanse-High-47760-Vector Forward logs to Loki using default value via HTTP", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create Loki project and deploy Loki Server")
oc.SetupProject()
lokiNS := oc.Namespace()
loki := externalLoki{"loki-server", lokiNS}
defer loki.remove(oc)
loki.deployLoki(oc)
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-47760",
namespace: lokiNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=http://"+loki.name+"."+lokiNS+".svc:3100")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
g.By("Searching for Application Logs in Loki")
appPodName, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Result[0].Stream.LogType == "application" && appLogs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed searching for application logs in Loki")
e2e.Logf("Application Logs Query is a success")
g.By("Searching for Audit Logs in Loki")
auditLogs, err := lc.searchLogsInLoki("", "{log_type=\"audit\"}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(auditLogs.Status).Should(o.Equal("success"))
o.Expect(auditLogs.Data.Result[0].Stream.LogType).Should(o.Equal("audit"))
o.Expect(auditLogs.Data.Stats.Summary.BytesProcessedPerSecond).ShouldNot(o.BeZero())
e2e.Logf("Audit Logs Query is a success")
g.By("Searching for Infra Logs in Loki")
infraLogs, err := lc.searchLogsInLoki("", "{log_type=\"infrastructure\"}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(infraLogs.Status).Should(o.Equal("success"))
o.Expect(infraLogs.Data.Result[0].Stream.LogType).Should(o.Equal("infrastructure"))
o.Expect(infraLogs.Data.Stats.Summary.BytesProcessedPerSecond).ShouldNot(o.BeZero())
})
g.It("CPaasrunOnly-Author:ikanse-Medium-48922-Vector Forward logs to Loki using correct loki.tenantKey.kubernetes.namespace_name via HTTP", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create Loki project and deploy Loki Server")
oc.SetupProject()
lokiNS := oc.Namespace()
loki := externalLoki{"loki-server", lokiNS}
defer loki.remove(oc)
loki.deployLoki(oc)
tenantKey := "kubernetes_namespace_name"
g.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-48922",
namespace: lokiNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "TENANT_KEY={.kubernetes.namespace_name||\"none\"}", "URL=http://"+loki.name+"."+lokiNS+".svc:3100", "INPUT_REFS=[\"application\"]")
g.By("Searching for Application Logs in Loki using tenantKey")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
appPodName, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := lc.searchByKey("", tenantKey, appProj)
if err != nil {
return false, err
}
if logs.Status == "success" && logs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && logs.Data.Result[0].Stream.LogType == "application" && logs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Failed searching for application logs in Loki")
e2e.Logf("Application Logs Query using namespace as tenantKey is a success")
})
g.It("CPaasrunOnly-Author:ikanse-Medium-48060-Medium-47801-Vector Forward logs to Loki using loki.labelKeys", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Create project1 for app logs")
appProj1 := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", loglabeltemplate, "-p", "LABELS={\"negative\": \"centos-logtest\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj2 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj2, "-f", loglabeltemplate, "-p", "LABELS={\"positive\": \"centos-logtest\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create Loki project and deploy Loki Server")
oc.SetupProject()
lokiNS := oc.Namespace()
loki := externalLoki{"loki-server", lokiNS}
defer loki.remove(oc)
loki.deployLoki(oc)
labelKeys := "kubernetes_labels_positive"
podLabel := "centos-logtest"
g.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-47801",
namespace: lokiNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "LABEL_KEYS=[\"kubernetes.labels.positive\"]", "URL=http://"+loki.name+"."+lokiNS+".svc:3100", "INPUT_REFS=[\"application\"]")
g.By("Searching for Application Logs in Loki using LabelKey - Postive match")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByKey("", labelKeys, podLabel)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Stats.Ingester.TotalLinesSent != 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Failed searching for application logs in Loki")
e2e.Logf("App logs found with matching LabelKey: %s and pod Label: %s", labelKeys, podLabel)
g.By("Searching for Application Logs in Loki using LabelKey - Negative match")
labelKeys = "kubernetes_labels_negative"
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByKey("", labelKeys, podLabel)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Store.TotalChunksDownloaded == 0 && appLogs.Data.Stats.Summary.BytesProcessedPerSecond == 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Failed searching for application logs in Loki")
e2e.Logf("No App logs found with matching LabelKey: %s and pod Label: %s", labelKeys, podLabel)
})
g.It("Author:ikanse-CPaasrunOnly-Medium-48925-Vector Forward logs to Loki using correct loki.tenantKey.kubernetes.container_name via HTTP", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create Loki project and deploy Loki Server")
oc.SetupProject()
lokiNS := oc.Namespace()
loki := externalLoki{"loki-server", lokiNS}
defer loki.remove(oc)
loki.deployLoki(oc)
tenantKey := "kubernetes_container_name"
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-48925",
namespace: lokiNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "TENANT_KEY={.kubernetes.container_name||\"none\"}", "URL=http://"+loki.name+"."+lokiNS+".svc:3100", "INPUT_REFS=[\"application\"]")
g.By("Searching for Application Logs in Loki using tenantKey")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
tenantKeyID := "logging-centos-logtest"
lc.waitForLogsAppearByKey("", tenantKey, tenantKeyID)
e2e.Logf("Application Logs Query using kubernetes.container_name as tenantKey is a success")
})
g.It("CPaasrunOnly-Author:qitang-High-71001-Collect or exclude logs by container[Slow]", func() {
exutil.By("Create Loki project and deploy Loki Server")
lokiNS := oc.Namespace()
loki := externalLoki{
name: "loki-server",
namespace: lokiNS,
}
defer loki.remove(oc)
loki.deployLoki(oc)
exutil.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-71001",
namespace: lokiNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "TENANT_KEY={.kubernetes.namespace_name||\"none\"}", "URL=http://"+loki.name+"."+lokiNS+".svc:3100", "INPUT_REFS=[\"application\"]")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app", "type": "application", "application": {"excludes": [{"container":"exclude*"}]}}]},{"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Create projects for app logs and deploy the log generators")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
multiContainerJSONLog := filepath.Join(loggingBaseDir, "generatelog", "multi_container_json_log_template.yaml")
oc.SetupProject()
ns := oc.Namespace()
containerNames := []string{
"logging-71001-include",
"exclude-logging-logs",
"fake-kube-proxy",
}
for _, name := range containerNames {
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", ns, "-p", "CONTAINER="+name, "-p", "CONFIGMAP="+name, "-p", "REPLICATIONCONTROLLER="+name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
err := oc.WithoutNamespace().Run("new-app").Args("-f", multiContainerJSONLog, "-n", ns, "-p", "CONTAINER=multiple-containers").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check logs in Loki, logs from containers/excludes* shouldn't be collected")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
lc.waitForLogsAppearByProject("", ns)
for _, container := range []string{"logging-71001-include", "fake-kube-proxy", "multiple-containers-0", "multiple-containers-1", "multiple-containers-2"} {
lc.waitForLogsAppearByKey("", "kubernetes_container_name", container)
}
for _, q := range []string{`{kubernetes_container_name=~"exclude.+"}`, `{kubernetes_namespace_name=~"openshift.+"}`} {
log, err := lc.searchLogsInLoki("", q)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue(), "find logs with query "+q+", this is not expected")
}
exutil.By("Update CLF to exclude all containers")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/excludes", "value": [{"container":"*"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
exutil.By("Check logs in Loki, no logs collected")
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", loki.namespace, "-l", "component=loki,appname=loki-server").Execute()
WaitForDeploymentPodsToBeReady(oc, loki.namespace, loki.name)
err = lc.waitForLogsAppearByQuery("", `{kubernetes_namespace_name=~".+"}`)
exutil.AssertWaitPollWithErr(err, "no container logs should be collected")
exutil.By("Update CLF to include/exclude containers")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/excludes", "value": [{"container":"exclude*"}]},{"op": "add", "path": "/spec/inputs/0/application/includes", "value": [{"container":"multiple-containers-0"},{"container":"*oxy"},{"container":"*log*"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
exutil.By("Check logs in Loki, only logs from containers multiple-containers-0, logging-71001-include and fake-kube-proxy should be collected")
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", loki.namespace, "-l", "component=loki,appname=loki-server").Execute()
WaitForDeploymentPodsToBeReady(oc, loki.namespace, loki.name)
lc.waitForLogsAppearByProject("", ns)
for _, container := range []string{"logging-71001-include", "fake-kube-proxy", "multiple-containers-0"} {
lc.waitForLogsAppearByKey("", "kubernetes_container_name", container)
}
for _, q := range []string{`{kubernetes_container_name=~"exclude.+"}`, `{kubernetes_namespace_name=~"openshift.+"}`, `{kubernetes_container_name=~"multiple-containers-1|multiple-containers-2"}`} {
log, err := lc.searchLogsInLoki("", q)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue(), "find logs with query "+q+", this is not expected")
}
exutil.By("Update CLF to include all application containers")
patch = `[{"op": "remove", "path": "/spec/inputs/0/application/excludes"},{"op": "replace", "path": "/spec/inputs/0/application/includes", "value": [{"container":"*"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
exutil.By("Check logs in Loki, only logs application projects should be collected")
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", loki.namespace, "-l", "component=loki,appname=loki-server").Execute()
WaitForDeploymentPodsToBeReady(oc, loki.namespace, loki.name)
lc.waitForLogsAppearByProject("", ns)
for _, container := range []string{"logging-71001-include", "fake-kube-proxy", "exclude-logging-logs", "multiple-containers-0", "multiple-containers-1", "multiple-containers-2"} {
lc.waitForLogsAppearByKey("", "kubernetes_container_name", container)
}
err = lc.waitForLogsAppearByQuery("", `{kubernetes_namespace_name=~"openshift.+"}`)
exutil.AssertWaitPollWithErr(err, "container logs from infra projects should not be collected")
})
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vector-lokistack", exutil.KubeConfigPath())
loggingBaseDir, s, sc string
)
g.Context("test forward logs to lokistack with vector", func() {
g.BeforeEach(func() {
s = getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ = getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a storage class for this test!")
}
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
oc.SetupProject()
})
// author [email protected]
g.It("Author:qitang-CPaasrunOnly-ConnectedOnly-Medium-48646-Medium-49486-Deploy lokistack under different namespace and Vector Forward logs to LokiStack using CLF with gateway[Serial]", func() {
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deploy loki stack")
oc.SetupProject()
lokiNS := oc.Namespace()
ls := lokiStack{
name: "loki-49486",
namespace: lokiNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-49486",
storageClass: sc,
bucketName: "logging-loki-49486-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
g.By("create clusterlogforwarder/instance")
lokiGatewaySVC := ls.name + "-gateway-http." + ls.namespace + ".svc:8080"
clf := clusterlogforwarder{
name: "clf-48646",
namespace: loggingNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack_gateway_https_secret.yaml"),
serviceAccountName: "logcollector-48646",
secretName: "lokistack-gateway-48646",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "GATEWAY_SVC="+lokiGatewaySVC)
//check logs in loki stack
g.By("check logs in loki")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
})
g.It("Author:kbharti-CPaasrunOnly-ConnectedOnly-Medium-54663-Medium-48628-unique cluster identifier in all type of the log record and Expose Loki metrics to Prometheus[Serial]", func() {
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-54663",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-54663",
storageClass: sc,
bucketName: "logging-loki-54663" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-54663",
namespace: loggingNS,
serviceAccountName: "logcollector-54663",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-54663",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("checking app, infra and audit logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
g.By("checking if the unique cluster identifier is added to the log records")
clusterID, err := getClusterID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
for _, logType := range []string{"application", "infrastructure", "audit"} {
logs, err := lc.searchByKey(logType, "log_type", logType)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs := extractLogEntities(logs)
for _, log := range extractedLogs {
o.Expect(log.OpenShift.ClusterID == clusterID).Should(o.BeTrue())
}
e2e.Logf("Find cluster_id in %s logs", logType)
}
svcs, err := oc.AdminKubeClient().CoreV1().Services(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/created-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
g.By("query metrics in prometheus")
prometheusToken := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
for _, svc := range svcs.Items {
if !strings.Contains(svc.Name, "grpc") && !strings.Contains(svc.Name, "ring") {
checkMetric(oc, prometheusToken, "{job=\""+svc.Name+"\"}", 3)
}
}
for _, metric := range []string{"loki_boltdb_shipper_compactor_running", "loki_distributor_bytes_received_total", "loki_inflight_requests", "workqueue_work_duration_seconds_bucket{namespace=\"" + loNS + "\", job=\"loki-operator-controller-manager-metrics-service\"}", "loki_build_info", "loki_ingester_streams_created_total"} {
checkMetric(oc, prometheusToken, metric, 3)
}
})
g.It("CPaasrunOnly-ConnectedOnly-Author:kbharti-High-57063-Forward app logs to Loki with namespace selectors (vector)[Serial]", func() {
g.By("Creating 2 applications..")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
appProj1 := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
appProj2 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj2, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-57063",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-57063",
storageClass: sc,
bucketName: "logging-loki-57063-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-57063",
namespace: loggingNS,
serviceAccountName: "logcollector-57063",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-57063",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, `INPUT_REFS=["infrastructure", "audit"]`)
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app", "type": "application", "application": {"includes": [{"namespace":"` + appProj2 + `"}]}}]}, {"op": "add", "path": "/spec/pipelines/0/inputRefs/-", "value": "new-app"}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
g.By("checking infra and audit logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
g.By("check logs in loki for custom app input..")
lc.waitForLogsAppearByProject("application", appProj2)
//no logs found for app not defined as custom input in clf
appLog, err := lc.searchByNamespace("application", appProj1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLog.Data.Result) == 0).Should(o.BeTrue())
})
//author [email protected]
g.It("Author:qitang-CPaasrunOnly-High-74945-New filter detectMultilineException test[Serial][Slow]", func() {
multilineLogTypes := map[string][]string{
"java": {javaExc, complexJavaExc, nestedJavaExc},
"go": {goExc, goOnGaeExc, goSignalExc, goHTTP},
"ruby": {rubyExc, railsExc},
"js": {clientJsExc, nodeJsExc, v8JsExc},
"csharp": {csharpAsyncExc, csharpNestedExc, csharpExc},
"python": {pythonExc},
"php": {phpOnGaeExc, phpExc},
"dart": {
dartAbstractClassErr,
dartArgumentErr,
dartAssertionErr,
dartAsyncErr,
dartConcurrentModificationErr,
dartDivideByZeroErr,
dartErr,
dartTypeErr,
dartExc,
dartUnsupportedErr,
dartUnimplementedErr,
dartOOMErr,
dartRangeErr,
dartReadStaticErr,
dartStackOverflowErr,
dartFallthroughErr,
dartFormatErr,
dartFormatWithCodeErr,
dartNoMethodErr,
dartNoMethodGlobalErr,
},
}
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-74945",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-74945",
storageClass: sc,
bucketName: "logging-loki-74945-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-74945",
namespace: loggingNS,
serviceAccountName: "logcollector-74945",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-74945",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "detectmultiline", "type": "detectMultilineException"}]}, {"op": "add", "path": "/spec/pipelines/0/filterRefs", "value":["detectmultiline"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
g.By("create some pods to generate multiline error")
multilineLogFile := filepath.Join(loggingBaseDir, "generatelog", "multiline-error-log.yaml")
for k := range multilineLogTypes {
ns := "multiline-log-" + k + "-74945"
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--wait=false").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ns, "deploy/multiline-log", "cm/multiline-log").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-n", ns, "-f", multilineLogFile, "-p", "LOG_TYPE="+k, "-p", "RATE=60.00").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("check data in Loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for k, v := range multilineLogTypes {
g.By("check " + k + " logs\n")
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("application", "multiline-log-"+k+"-74945")
if err != nil {
return false, err
}
if appLogs.Status == "success" && len(appLogs.Data.Result) > 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "can't find "+k+" logs")
for _, log := range v {
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 120*time.Second, true, func(context.Context) (done bool, err error) {
dataInLoki, _ := lc.queryRange("application", "{kubernetes_namespace_name=\"multiline-log-"+k+"-74945\"}", len(v)*2, time.Now().Add(time.Duration(-2)*time.Hour), time.Now(), false)
lokiLogs := extractLogEntities(dataInLoki)
var messages []string
for _, lokiLog := range lokiLogs {
messages = append(messages, lokiLog.Message)
}
if len(messages) == 0 {
return false, nil
}
if !containSubstring(messages, log) {
e2e.Logf("can't find log\n%s, try next round", log)
return false, nil
}
return true, nil
})
if err != nil {
e2e.Failf("%s logs are not parsed", k)
}
}
e2e.Logf("\nfound %s logs in Loki\n", k)
}
})
g.It("CPaasrunOnly-ConnectedOnly-Author:qitang-Medium-71144-Collect or exclude infrastructure logs.[Serial][Slow]", func() {
exutil.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-71144",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71144",
storageClass: sc,
bucketName: "logging-loki-71144-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-71144",
namespace: loggingNS,
serviceAccountName: "logcollector-71144",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-71144",
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "INPUT_REFS=[\"infrastructure\"]")
exutil.By("checking infra logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
err = lc.waitForLogsAppearByQuery("infrastructure", `{log_type="infrastructure",kubernetes_namespace_name=~".+"}`)
exutil.AssertWaitPollNoErr(err, "can't find infra container logs")
err = lc.waitForLogsAppearByQuery("infrastructure", `{log_type="infrastructure",kubernetes_namespace_name!~".+"}`)
exutil.AssertWaitPollNoErr(err, "can't find journal logs")
exutil.By("update CLF to only collect journal logs")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "selected-infra", "type": "infrastructure", "infrastructure": {"sources":["node"]}}]},{"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["selected-infra"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 3 minutes for collector pods to send the cached records
time.Sleep(3 * time.Minute)
exutil.By("check data in lokistack, only journal logs are collected")
re, _ := lc.queryRange("infrastructure", `{ log_type="infrastructure", kubernetes_namespace_name!~".+" }`, 30, time.Now().Add(time.Duration(-2)*time.Minute), time.Now(), true)
o.Expect(len(re.Data.Result) > 0).Should(o.BeTrue())
re, _ = lc.queryRange("infrastructure", `{ log_type="infrastructure", kubernetes_namespace_name=~".+" }`, 30, time.Now().Add(time.Duration(-2)*time.Minute), time.Now(), true)
o.Expect(len(re.Data.Result) == 0).Should(o.BeTrue())
exutil.By("Update CLF to collect infra container logs")
patch = `[{"op": "replace", "path": "/spec/inputs/0/infrastructure/sources", "value": ["container"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 3 minutes for collector pods to send the cached records
time.Sleep(3 * time.Minute)
exutil.By("check data in lokistack, only infra container logs are collected")
//check vector.toml, logs from logging infra pods should be excluded
searchString := `include_paths_glob_patterns = ["/var/log/pods/default_*/*/*.log", "/var/log/pods/kube-*_*/*/*.log", "/var/log/pods/kube_*/*/*.log", "/var/log/pods/openshift-*_*/*/*.log", "/var/log/pods/openshift_*/*/*.log"]
exclude_paths_glob_patterns = ["/var/log/pods/*/*/*.gz", "/var/log/pods/*/*/*.log.*", "/var/log/pods/*/*/*.tmp", "/var/log/pods/openshift-logging_*/gateway/*.log", "/var/log/pods/openshift-logging_*/loki*/*.log", "/var/log/pods/openshift-logging_*/opa/*.log", "/var/log/pods/openshift-logging_elasticsearch-*/*/*.log", "/var/log/pods/openshift-logging_kibana-*/*/*.log", "/var/log/pods/openshift-logging_logfilesmetricexporter-*/*/*.log"]`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
re, _ = lc.queryRange("infrastructure", `{ log_type="infrastructure", kubernetes_namespace_name=~".+" }`, 30, time.Now().Add(time.Duration(-2)*time.Minute), time.Now(), true)
o.Expect(len(re.Data.Result) > 0).Should(o.BeTrue())
re, _ = lc.queryRange("infrastructure", `{ log_type="infrastructure", kubernetes_namespace_name!~".+" }`, 30, time.Now().Add(time.Duration(-2)*time.Minute), time.Now(), true)
o.Expect(len(re.Data.Result) == 0).Should(o.BeTrue())
})
// author [email protected]
g.It("CPaasrunOnly-ConnectedOnly-Author:qitang-High-71749-Drop logs based on test of fields and their values[Serial][Slow]", func() {
exutil.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-71749",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71749",
storageClass: sc,
bucketName: "logging-loki-71749-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "collector-71749",
namespace: loggingNS,
serviceAccountName: "logcollector-71749",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "71749.yaml"),
secretName: "lokistack-secret-71749",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("Create projects for app logs and deploy the log generators")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
var namespaces []string
for i := 0; i < 3; i++ {
ns := "logging-project-71749-" + strconv.Itoa(i)
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
namespaces = append(namespaces, ns)
}
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[0], "-p", "LABELS={\"test\": \"logging-71749-test\"}", "-p", "REPLICATIONCONTROLLER=logging-71749-test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[1], "-p", "LABELS={\"test\": \"logging-71749-test\", \"test.logging.io/logging.qe-test-label\": \"logging-71749-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[2], "-p", "LABELS={\"test\": \"logging-71749-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
lc.waitForLogsAppearByKey("application", "log_type", "application")
lc.waitForLogsAppearByKey("infrastructure", "log_type", "infrastructure")
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
// logs from openshift* projects are dropped
re, err := lc.searchLogsInLoki("infrastructure", `{ log_type="infrastructure", kubernetes_namespace_name=~"openshift.+" }`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(re.Data.Result) == 0).Should(o.BeTrue())
// only logs from namespaces[2] should be collected
app, err := lc.searchLogsInLoki("application", `{ log_type="application", kubernetes_namespace_name!~"`+namespaces[2]+`" }`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(app.Data.Result) == 0).Should(o.BeTrue())
//logs with a level of `error` and with a message that includes the word `error` are dropped
infra, err := lc.searchLogsInLoki("infrastructure", `{ log_type="infrastructure" } | level=~"error|err|eror", message=~".+error.+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(infra.Data.Result) == 0).Should(o.BeTrue())
})
g.It("Author:anli-CPaasrunOnly-Critical-71049-Inputs.receiver.syslog to lokistack[Serial][Slow]", func() {
g.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-71049",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71049",
storageClass: sc,
bucketName: "logging-loki-71049-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to forward logs to lokistack")
clf := clusterlogforwarder{
name: "instance-71049",
namespace: loggingNS,
serviceAccountName: "logcollector-71049",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "71049.yaml"),
secretName: "lokistack-secret-71049",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Create clusterlogforwarder as syslog clinet and forward logs to syslogserver")
sysCLF := clusterlogforwarder{
name: "instance",
namespace: oc.Namespace(),
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "rsyslog-serverAuth.yaml"),
secretName: "clf-syslog-secret",
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
g.By("Create secret for collector pods to connect to syslog server")
tmpDir := "/tmp/" + getRandomString()
defer exec.Command("rm", "-r", tmpDir).Output()
err = os.Mkdir(tmpDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+clf.name+"-syslog", "-n", clf.namespace, "--confirm", "--to="+tmpDir).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", sysCLF.secretName, "-n", sysCLF.namespace, "--from-file=ca-bundle.crt="+tmpDir+"/tls.crt").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer sysCLF.delete(oc)
sysCLF.create(oc, "URL=tls://"+clf.name+"-syslog."+clf.namespace+".svc:6514")
//check logs in loki stack
g.By("check logs in loki")
defer removeClusterRoleFromServiceAccount(oc, sysCLF.namespace, "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, sysCLF.namespace, "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", sysCLF.namespace)
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
lc.waitForLogsAppearByKey("infrastructure", "log_type", "infrastructure")
sysLog, err := lc.searchLogsInLoki("infrastructure", `{log_type = "infrastructure"}|json|facility = "local0"`)
o.Expect(err).NotTo(o.HaveOccurred())
sysLogs := extractLogEntities(sysLog)
o.Expect(len(sysLogs) > 0).Should(o.BeTrue(), "can't find logs from syslog in lokistack")
})
g.It("Author:qitang-CPaasrunOnly-High-76727-Add stream info to data model viaq[Serial][Slow]", func() {
multilineLogs := []string{
javaExc, complexJavaExc, nestedJavaExc,
goExc, goOnGaeExc, goSignalExc, goHTTP,
rubyExc, railsExc,
clientJsExc, nodeJsExc, v8JsExc,
csharpAsyncExc, csharpNestedExc, csharpExc,
pythonExc,
phpOnGaeExc, phpExc,
dartAbstractClassErr,
dartArgumentErr,
dartAssertionErr,
dartAsyncErr,
dartConcurrentModificationErr,
dartDivideByZeroErr,
dartErr,
dartTypeErr,
dartExc,
dartUnsupportedErr,
dartUnimplementedErr,
dartOOMErr,
dartRangeErr,
dartReadStaticErr,
dartStackOverflowErr,
dartFallthroughErr,
dartFormatErr,
dartFormatWithCodeErr,
dartNoMethodErr,
dartNoMethodGlobalErr,
}
exutil.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-76727",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-76727",
storageClass: sc,
bucketName: "logging-loki-76727-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to forward logs to lokistack")
clf := clusterlogforwarder{
name: "instance-76727",
namespace: loggingNS,
serviceAccountName: "logcollector-76727",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-76727",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "detectmultiline", "type": "detectMultilineException"}]}, {"op": "add", "path": "/spec/pipelines/0/filterRefs", "value":["detectmultiline"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("create some pods to generate multiline errors")
multilineLogFile := filepath.Join(loggingBaseDir, "generatelog", "multiline-error-log.yaml")
ioStreams := []string{"stdout", "stderr"}
for _, ioStream := range ioStreams {
ns := "multiline-log-" + ioStream + "-76727"
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--wait=false").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ns, "deploy/multiline-log", "cm/multiline-log").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-n", ns, "-f", multilineLogFile, "-p", "OUT_STREAM="+ioStream, "-p", "RATE=60.00").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("check data in Loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, ioStream := range ioStreams {
lc.waitForLogsAppearByProject("application", "multiline-log-"+ioStream+"-76727")
dataInLoki, _ := lc.searchByNamespace("application", "multiline-log-"+ioStream+"-76727")
lokiLog := extractLogEntities(dataInLoki)
for _, log := range lokiLog {
o.Expect(log.Kubernetes.ContainerIOStream == ioStream).Should(o.BeTrue(), `iostream is wrong, expected: `+ioStream+`, got: `+log.Kubernetes.ContainerIOStream)
o.Expect(containSubstring(multilineLogs, log.Message)).Should(o.BeTrue(), fmt.Sprintf("Parse multiline error failed, iostream: %s, message: \n%s", ioStream, log.Message))
}
}
})
g.It("Author:qitang-CPaasrunOnly-High-78380-Collector should collect logs from all log sources.[Serial]", func() {
exutil.By("Deploying LokiStack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-78380",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-78380",
storageClass: sc,
bucketName: "logging-loki-78380-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to forward to lokistack")
clf := clusterlogforwarder{
name: "clf-78380-" + getRandomString(),
namespace: loggingNS,
serviceAccountName: "clf-78380",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-78380",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("checking app, infra and audit logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
exutil.By("Check audit logs, should find logs from each directory")
// for OVN audit logs, it's covered in OCP-71143 and OCP-53995
for _, q := range []string{
`{log_type="audit"} | json | log_source="auditd"`,
`{log_type="audit"} | json | log_source="kubeAPI"`,
`{log_type="audit"} | json | log_source="openshiftAPI" | requestURI=~"/apis/route.openshift.io.+"`, //openshift-apiserver
`{log_type="audit"} | json | log_source="openshiftAPI" | requestURI=~"/apis/oauth.openshift.io/.+"`, //oauth-apiserver
`{log_type="audit"} | json | log_source="openshiftAPI" | requestURI=~"/oauth/authorize.+"`, //oauth-server
`{log_type="audit"} | json | log_source="openshiftAPI" | requestURI=~"/login/.+"`, //oauth-server
} {
err = lc.waitForLogsAppearByQuery("audit", q)
exutil.AssertWaitPollNoErr(err, "can't find log with query: "+q)
}
})
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vector-loki-ext", exutil.KubeConfigPath())
loggingBaseDir string
)
g.Context("Test forward logs to external Grafana Loki log store", func() {
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO")
CLO.SubscribeOperator(oc)
oc.SetupProject()
})
g.It("Author:qitang-CPaasrunOnly-Critical-75298-Forward to Loki with default labelKeys", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Fetch and set the Grafana Loki credentials")
lokiUsername, lokiPassword, err := getExtLokiSecret()
o.Expect(err).NotTo(o.HaveOccurred())
lokiURL := "https://logs-prod3.grafana.net"
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create secret with external Grafana Loki instance credentials")
sct := resource{"secret", "loki-client", clfNS}
defer sct.clear(oc)
_, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args(sct.kind, "generic", sct.name, "-n", sct.namespace, "--from-literal=username="+lokiUsername+"", "--from-literal=password="+lokiPassword+"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sct.WaitForResourceToAppear(oc)
g.By("Create ClusterLogForwarder to forward logs to the external Loki instance")
clf := clusterlogforwarder{
name: "clf-75298",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-external-loki-with-secret.yaml"),
secretName: sct.name,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "LOKI_URL="+lokiURL, "INPUTREFS=[\"application\"]", `TUNING={"compression": "snappy", "deliveryMode": "AtLeastOnce", "maxWrite": "10M"}`)
exutil.By("check logs in grafana loki")
lc := newLokiClient(lokiURL).withBasicAuth(lokiUsername, lokiPassword).retry(5)
lc.waitForLogsAppearByProject("", appProj)
exutil.By("Check configurations in collector pods")
expectedConfigs := []string{
`compression = "snappy"`,
`[sinks.output_loki_server.batch]
max_bytes = 10000000`,
`[sinks.output_loki_server.buffer]
type = "disk"
when_full = "block"
max_size = 268435488`,
}
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", expectedConfigs...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).Should(o.BeTrue())
})
g.It("CPaasrunOnly-Author:ikanse-Medium-48490-Vector Forward logs to Grafana Loki using HTTPS and existing loki.tenantKey kubernetes.labels.test", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Fetch and set the Grafana Loki credentials")
lokiUsername, lokiPassword, err := getExtLokiSecret()
o.Expect(err).NotTo(o.HaveOccurred())
lokiURL := "https://logs-prod3.grafana.net"
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create secret with external Grafana Loki instance credentials")
sct := resource{"secret", "loki-client", clfNS}
defer sct.clear(oc)
_, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args(sct.kind, "generic", sct.name, "-n", sct.namespace, "--from-literal=username="+lokiUsername+"", "--from-literal=password="+lokiPassword+"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sct.WaitForResourceToAppear(oc)
g.By("Create ClusterLogForwarder to forward logs to the external Loki instance with tenantKey kubernetes_labels.test")
clf := clusterlogforwarder{
name: "clf-48490",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-external-loki-with-secret-tenantKey.yaml"),
secretName: sct.name,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "LOKI_URL="+lokiURL, "TENANTKEY={.kubernetes.labels.test||\"none\"}")
g.By(fmt.Sprintf("Search for the %s project logs in Loki", appProj))
lc := newLokiClient(lokiURL).withBasicAuth(lokiUsername, lokiPassword).retry(5)
g.By("Searching for Application Logs in Loki")
appPodName, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Result[0].Stream.LogType == "application" && appLogs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed searching for application logs in Loki")
e2e.Logf("Application Logs Query is a success")
})
g.It("CPaasrunOnly-Author:ikanse-Medium-48923-Vector Forward logs to Grafana Loki using HTTPS and existing loki.tenantKey kubernetes.namespace_name", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Fetch and set the Grafana Loki credentials")
lokiUsername, lokiPassword, err := getExtLokiSecret()
o.Expect(err).NotTo(o.HaveOccurred())
lokiURL := "https://logs-prod3.grafana.net"
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create secret with external Grafana Loki instance credentials")
sct := resource{"secret", "loki-client", clfNS}
defer sct.clear(oc)
_, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args(sct.kind, "generic", sct.name, "-n", sct.namespace, "--from-literal=username="+lokiUsername+"", "--from-literal=password="+lokiPassword+"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sct.WaitForResourceToAppear(oc)
g.By("Create ClusterLogForwarder to forward logs to the external Loki instance with tenantKey kubernetes_labels.test")
clf := clusterlogforwarder{
name: "clf-48923",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-external-loki-with-secret-tenantKey.yaml"),
secretName: sct.name,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "LOKI_URL="+lokiURL, "TENANTKEY={.kubernetes.namespace_name||\"none\"}")
g.By(fmt.Sprintf("Search for the %s project logs in Loki", appProj))
lc := newLokiClient(lokiURL).withBasicAuth(lokiUsername, lokiPassword).retry(5)
g.By("Searching for Application Logs in Loki")
appPodName, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Result[0].Stream.LogType == "application" && appLogs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed searching for application logs in Loki")
e2e.Logf("Application Logs Query is a success")
})
g.It("CPaasrunOnly-Author:ikanse-High-62975-Collector connects to the remote output using the cipher defined in the tlsSecurityPrfoile [Slow][Disruptive]", func() {
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Fetch and set the Grafana Loki credentials")
lokiUsername, lokiPassword, err := getExtLokiSecret()
o.Expect(err).NotTo(o.HaveOccurred())
lokiURL := "https://logs-prod3.grafana.net"
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create secret with external Grafana Loki instance credentials")
sct := resource{"secret", "loki-client", clfNS}
defer sct.clear(oc)
_, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args(sct.kind, "generic", sct.name, "-n", sct.namespace, "--from-literal=username="+lokiUsername+"", "--from-literal=password="+lokiPassword+"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sct.WaitForResourceToAppear(oc)
g.By("Create ClusterLogForwarder to forward logs to the external Loki instance")
clf := clusterlogforwarder{
name: "clf-62975",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-external-loki-with-secret.yaml"),
secretName: sct.name,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "LOKI_URL="+lokiURL, "INPUTREFS=[\"application\"]")
patch := `[{"op": "add", "path": "/spec/outputs/0/tls", "value": {"securityProfile": {"type": "Custom", "custom": {"ciphers": ["TLS_AES_128_CCM_SHA256"], "minTLSVersion": "VersionTLS13"}}}}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
g.By("The Loki sink in Vector config must use the Custom tlsSecurityProfile with ciphersuite TLS_AES_128_CCM_SHA256")
searchString := `[sinks.output_loki_server.tls]
min_tls_version = "VersionTLS13"
ciphersuites = "TLS_AES_128_CCM_SHA256"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 3*time.Minute, true, func(context.Context) (done bool, err error) {
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
if err != nil {
return false, nil
}
return strings.Contains(collectorLogs, "error trying to connect"), nil
})
exutil.AssertWaitPollNoErr(err, "Collector shouldn't connect to the external Loki server.")
g.By("Searching for Application Logs in Loki")
lc := newLokiClient(lokiURL).withBasicAuth(lokiUsername, lokiPassword).retry(5)
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj)
if err != nil {
return false, err
}
return appLogs.Status == "success" && len(appLogs.Data.Result) == 0, nil
})
exutil.AssertWaitPollNoErr(err, "Failed searching for application logs in Loki")
g.By("Set the Custom tlsSecurityProfile for Loki output")
patch = `[{"op": "replace", "path": "/spec/outputs/0/tls/securityProfile/custom/ciphers", "value": ["TLS_CHACHA20_POLY1305_SHA256"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("The Loki sink in Vector config must use the Custom tlsSecurityProfile with ciphersuite TLS_CHACHA20_POLY1305_SHA256")
searchString = `[sinks.output_loki_server.tls]
min_tls_version = "VersionTLS13"
ciphersuites = "TLS_CHACHA20_POLY1305_SHA256"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 3*time.Minute, true, func(context.Context) (done bool, err error) {
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
if err != nil {
return false, nil
}
return !strings.Contains(collectorLogs, "error trying to connect"), nil
})
exutil.AssertWaitPollNoErr(err, "Unable to connect to the external Loki server.")
g.By("Searching for Application Logs in Loki")
lc.waitForLogsAppearByProject("", appProj)
})
g.It("CPaasrunOnly-Author:ikanse-Low-61476-Collector-External Loki output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Configure the global tlsSecurityProfile to use Intermediate profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"intermediate":{},"type":"Intermediate"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Fetch and set the Grafana Loki credentials")
lokiUsername, lokiPassword, err := getExtLokiSecret()
o.Expect(err).NotTo(o.HaveOccurred())
lokiURL := "https://logs-prod3.grafana.net"
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create secret with external Grafana Loki instance credentials")
sct := resource{"secret", "loki-client", clfNS}
defer sct.clear(oc)
_, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args(sct.kind, "generic", sct.name, "-n", sct.namespace, "--from-literal=username="+lokiUsername+"", "--from-literal=password="+lokiPassword+"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sct.WaitForResourceToAppear(oc)
g.By("Create ClusterLogForwarder to forward logs to the external Loki instance")
clf := clusterlogforwarder{
name: "clf-61476",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-external-loki-with-secret.yaml"),
secretName: sct.name,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
inputRefs := "[\"application\"]"
clf.create(oc, "LOKI_URL="+lokiURL, "INPUTREFS="+inputRefs)
g.By("The Loki sink in Vector config must use the intermediate tlsSecurityProfile")
searchString := `[sinks.output_loki_server.tls]
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Searching for Application Logs in Loki")
lc := newLokiClient(lokiURL).withBasicAuth(lokiUsername, lokiPassword).retry(5)
appPodName, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Result[0].Stream.LogType == "application" && appLogs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed searching for application logs in Loki")
e2e.Logf("Application Logs Query is a success")
g.By("Set the Modern tlsSecurityProfile for Loki output")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls", "value": {"securityProfile":{"type":"Modern"}}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj1 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("The Loki sink in Vector config must use the Modern tlsSecurityProfile")
searchString = `[sinks.output_loki_server.tls]
min_tls_version = "VersionTLS13"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external Loki server.")
g.By("Searching for Application Logs in Loki")
appPodName, err = oc.AdminKubeClient().CoreV1().Pods(appProj1).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj1)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Result[0].Stream.LogType == "application" && appLogs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed searching for application logs in Loki")
e2e.Logf("Application Logs Query is a success")
})
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("lokistack-tlssecurity", exutil.KubeConfigPath())
loggingBaseDir, s, sc string
)
g.Context("ClusterLogging LokiStack tlsSecurityProfile tests", func() {
g.BeforeEach(func() {
s = getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ = getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a storage class for this test!")
}
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
oc.SetupProject()
})
g.It("CPaasrunOnly-ConnectedOnly-Author:ikanse-High-54523-LokiStack Cluster Logging comply with the intermediate TLS security profile when global API Server has no tlsSecurityProfile defined[Slow][Disruptive]", func() {
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Remove any tlsSecurityProfile config")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": null}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-54523",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-54523",
storageClass: sc,
bucketName: "logging-loki-54523-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-54523",
namespace: loggingNS,
serviceAccountName: "logcollector-54523",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-54523",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("checking app, audit and infra logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
g.By("Check that the LokiStack gateway is using the Intermediate tlsSecurityProfile")
server := fmt.Sprintf("%s-gateway-http:8081", ls.name)
checkTLSProfile(oc, "intermediate", "RSA", server, "/run/secrets/kubernetes.io/serviceaccount/service-ca.crt", ls.namespace, 2)
g.By("Check the LokiStack config for the intermediate TLS security profile ciphers and TLS version")
dirname := "/tmp/" + oc.Namespace() + "-lkcnf"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
expectedConfigs := []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "VersionTLS12"}
for i := 0; i < len(expectedConfigs); i++ {
count := strings.Count(string(lokiStackConf), expectedConfigs[i])
o.Expect(count).To(o.Equal(8), fmt.Sprintf("Unexpected number of occurrences of %s", expectedConfigs[i]))
}
g.By("Check the LokiStack pods have mounted the Loki config.yaml")
podList, err := oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/managed-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
gatewayPod := ls.name + "-gateway-"
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, gatewayPod) {
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", pod.Name, "-n", ls.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "/etc/loki/config/config.yaml")).Should(o.BeTrue())
vl := ls.name + "-config"
o.Expect(strings.Contains(output, vl)).Should(o.BeTrue())
}
}
})
g.It("CPaasrunOnly-ConnectedOnly-Author:ikanse-Medium-54525-LokiStack Cluster Logging comply with the old tlsSecurityProfile when configured in the global API server configuration[Slow][Disruptive]", func() {
if isFipsEnabled(oc) {
g.Skip("skip old tlsSecurityProfile on FIPS enabled cluster")
}
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Configure the global tlsSecurityProfile to use old profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"old":{},"type":"Old"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-54525",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-54525",
storageClass: sc,
bucketName: "logging-loki-54525-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-54525",
namespace: loggingNS,
serviceAccountName: "logcollector-54525",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-54525",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("checking app, audit and infra logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
g.By("Check that the LokiStack gateway is using the Old tlsSecurityProfile")
server := fmt.Sprintf("%s-gateway-http:8081", ls.name)
checkTLSProfile(oc, "old", "RSA", server, "/run/secrets/kubernetes.io/serviceaccount/service-ca.crt", ls.namespace, 2)
g.By("Check the LokiStack config for the Old TLS security profile ciphers and TLS version")
dirname := "/tmp/" + oc.Namespace() + "-lkcnf"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
expectedConfigs := []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA", "VersionTLS10"}
for i := 0; i < len(expectedConfigs); i++ {
count := strings.Count(string(lokiStackConf), expectedConfigs[i])
o.Expect(count).To(o.Equal(8), fmt.Sprintf("Unexpected number of occurrences of %s", expectedConfigs[i]))
}
g.By("Check the LokiStack pods have mounted the Loki config.yaml")
podList, err := oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/managed-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
gatewayPod := ls.name + "-gateway-"
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, gatewayPod) {
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", pod.Name, "-n", ls.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "/etc/loki/config/config.yaml")).Should(o.BeTrue())
vl := ls.name + "-config"
o.Expect(strings.Contains(output, vl)).Should(o.BeTrue())
}
}
})
g.It("Author:ikanse-CPaasrunOnly-ConnectedOnly-Medium-54526-Forwarding to lokistack comply with the custom tlsSecurityProfile when configured in the global API server configuration[Slow][Disruptive]", func() {
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Configure the global tlsSecurityProfile to use custom profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"custom":{"ciphers":["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256"],"minTLSVersion":"VersionTLS12"},"type":"Custom"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-54526",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-54526",
storageClass: sc,
bucketName: "logging-loki-54526-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-54526",
namespace: loggingNS,
serviceAccountName: "logcollector-54526",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-54526",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("checking app, audit and infra logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
g.By("Check that the LokiStack gateway is using the Custom tlsSecurityProfile")
server := fmt.Sprintf("%s-gateway-http:8081", ls.name)
checkTLSProfile(oc, "custom", "RSA", server, "/run/secrets/kubernetes.io/serviceaccount/service-ca.crt", ls.namespace, 2)
g.By("Check the LokiStack config for the Custom TLS security profile ciphers and TLS version")
dirname := "/tmp/" + oc.Namespace() + "-lkcnf"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
expectedConfigs := []string{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "VersionTLS12"}
for i := 0; i < len(expectedConfigs); i++ {
count := strings.Count(string(lokiStackConf), expectedConfigs[i])
o.Expect(count).To(o.Equal(8), fmt.Sprintf("Unexpected number of occurrences of %s", expectedConfigs[i]))
}
g.By("Check the LokiStack pods have mounted the Loki config.yaml")
podList, err := oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/managed-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
gatewayPod := ls.name + "-gateway-"
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, gatewayPod) {
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", pod.Name, "-n", ls.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "/etc/loki/config/config.yaml")).Should(o.BeTrue())
vl := ls.name + "-config"
o.Expect(strings.Contains(output, vl)).Should(o.BeTrue())
}
}
})
g.It("CPaasrunOnly-ConnectedOnly-Author:ikanse-Medium-54527-LokiStack Cluster Logging comply with the global tlsSecurityProfile - old to intermediate[Slow][Disruptive]", func() {
if isFipsEnabled(oc) {
g.Skip("skip old tlsSecurityProfile on FIPS enabled cluster")
}
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Configure the global tlsSecurityProfile to use old profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"old":{},"type":"Old"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-54527",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-54527",
storageClass: sc,
bucketName: "logging-loki-54527-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-54527",
namespace: loggingNS,
serviceAccountName: "logcollector-54527",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-54527",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("checking app, audit and infra logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
g.By("Check that the LokiStack gateway is using the Old tlsSecurityProfile")
server := fmt.Sprintf("%s-gateway-http:8081", ls.name)
checkTLSProfile(oc, "old", "RSA", server, "/run/secrets/kubernetes.io/serviceaccount/service-ca.crt", ls.namespace, 2)
g.By("Check the LokiStack config for the Old TLS security profile ciphers and TLS version")
dirname := "/tmp/" + oc.Namespace() + "-lkcnf"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
expectedConfigs := []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA", "VersionTLS10"}
for i := 0; i < len(expectedConfigs); i++ {
count := strings.Count(string(lokiStackConf), expectedConfigs[i])
o.Expect(count).To(o.Equal(8), fmt.Sprintf("Unexpected number of occurrences of %s", expectedConfigs[i]))
}
g.By("Check the LokiStack pods have mounted the Loki config.yaml")
podList, err := oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/managed-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
gatewayPod := ls.name + "-gateway-"
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, gatewayPod) {
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", pod.Name, "-n", ls.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "/etc/loki/config/config.yaml")).Should(o.BeTrue())
vl := ls.name + "-config"
o.Expect(strings.Contains(output, vl)).Should(o.BeTrue())
}
}
g.By("Configure the global tlsSecurityProfile to use Intermediate profile")
patch = `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"intermediate":{},"type":"Intermediate"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
e2e.Logf("Sleep for 3 minutes to allow LokiStack to reconcile and use the changed tlsSecurityProfile config.")
time.Sleep(3 * time.Minute)
ls.waitForLokiStackToBeReady(oc)
waitForOperatorsRunning(oc)
g.By("create a new project")
oc.SetupProject()
newAppProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", newAppProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("checking app, audit and infra logs in loki")
route = "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc = newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", newAppProj)
g.By("Check that the LokiStack gateway is using the intermediate tlsSecurityProfile")
server = fmt.Sprintf("%s-gateway-http:8081", ls.name)
checkTLSProfile(oc, "intermediate", "RSA", server, "/run/secrets/kubernetes.io/serviceaccount/service-ca.crt", ls.namespace, 2)
g.By("Check the LokiStack config for the intermediate TLS security profile ciphers and TLS version")
os.RemoveAll(dirname)
dirname = "/tmp/" + oc.Namespace() + "-lkcnf"
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err = os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
expectedConfigs = []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "VersionTLS12"}
for i := 0; i < len(expectedConfigs); i++ {
count := strings.Count(string(lokiStackConf), expectedConfigs[i])
o.Expect(count).To(o.Equal(8), fmt.Sprintf("Unexpected number of occurrences of %s", expectedConfigs[i]))
}
g.By("Check the LokiStack pods have mounted the Loki config.yaml")
podList, err = oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/managed-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
gatewayPod = ls.name + "-gateway-"
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, gatewayPod) {
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", pod.Name, "-n", ls.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "/etc/loki/config/config.yaml")).Should(o.BeTrue())
vl := ls.name + "-config"
o.Expect(strings.Contains(output, vl)).Should(o.BeTrue())
}
}
})
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("loki-log-alerts-vector", exutil.KubeConfigPath())
loggingBaseDir, s string
)
g.Context("Loki Log Alerts testing", func() {
g.BeforeEach(func() {
s = getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
oc.SetupProject()
})
g.It("CPaasrunOnly-Author:kbharti-High-52779-High-55393-Loki Operator - Validate alert and recording rules in LokiRuler configmap and Rules API(cluster-admin)[Serial]", func() {
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
appProj := oc.Namespace()
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", appProj, "openshift.io/cluster-monitoring=true").Execute()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR")
ls := lokiStack{
name: "loki-52779",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-52779",
storageClass: sc,
bucketName: "logging-loki-52779-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
g.By("Create Loki Alert and recording rules")
appAlertingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-alerting-rule-template.yaml")
appAlertRule := resource{"alertingrule", "my-app-workload-alert", appProj}
defer appAlertRule.clear(oc)
err = appAlertRule.applyFromTemplate(oc, "-n", appAlertRule.namespace, "-f", appAlertingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
appRecordingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-recording-rule-template.yaml")
appRecordRule := resource{"recordingrule", "my-app-workload-record", appProj}
defer appRecordRule.clear(oc)
err = appRecordRule.applyFromTemplate(oc, "-n", appRecordRule.namespace, "-f", appRecordingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
infraAlertingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-infra-alerting-rule-template.yaml")
infraAlertRule := resource{"alertingrule", "my-infra-workload-alert", loNS}
defer infraAlertRule.clear(oc)
err = infraAlertRule.applyFromTemplate(oc, "-n", infraAlertRule.namespace, "-f", infraAlertingTemplate)
o.Expect(err).NotTo(o.HaveOccurred())
infraRecordingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-infra-recording-rule-template.yaml")
infraRecordRule := resource{"recordingrule", "my-infra-workload-record", loNS}
defer infraRecordRule.clear(oc)
err = infraRecordRule.applyFromTemplate(oc, "-n", infraRecordRule.namespace, "-f", infraRecordingTemplate)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-52779",
namespace: loggingNS,
serviceAccountName: "logcollector-52779",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-52779",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Validating loki rules configmap")
expectedRules := []string{appProj + "-my-app-workload-alert", appProj + "-my-app-workload-record", loNS + "-my-infra-workload-alert", loNS + "-my-infra-workload-record"}
rulesCM, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", ls.namespace, ls.name+"-rules-0", "-o=jsonpath={.data}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, expectedRule := range expectedRules {
if !strings.Contains(string(rulesCM), expectedRule) {
g.Fail("Response is missing " + expectedRule)
}
}
e2e.Logf("Data has been validated in the rules configmap")
g.By("Querying rules API for application alerting/recording rules")
// adding cluster-admin role to a sa, but still can't query rules without `kubernetes_namespace_name=<project-name>`
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
appRules, err := lc.queryRules("application", appProj)
o.Expect(err).NotTo(o.HaveOccurred())
matchDataInResponse := []string{"name: MyAppLogVolumeAlert", "alert: MyAppLogVolumeIsHigh", "tenantId: application", "name: HighAppLogsToLoki1m", "record: loki:operator:applogs:rate1m"}
for _, matchedData := range matchDataInResponse {
if !strings.Contains(string(appRules), matchedData) {
g.Fail("Response is missing " + matchedData)
}
}
infraRules, err := lc.queryRules("infrastructure", loNS)
o.Expect(err).NotTo(o.HaveOccurred())
matchDataInResponse = []string{"name: LokiOperatorLogsHigh", "alert: LokiOperatorLogsAreHigh", "tenantId: infrastructure", "name: LokiOperatorLogsAreHigh1m", "record: loki:operator:infralogs:rate1m"}
for _, matchedData := range matchDataInResponse {
if !strings.Contains(string(infraRules), matchedData) {
g.Fail("Response is missing " + matchedData)
}
}
e2e.Logf("Rules API response validated succesfully")
})
g.It("CPaasrunOnly-Author:kbharti-Critical-55415-Loki Operator - Validate AlertManager support for cluster-monitoring is decoupled from User-workload monitoring[Serial]", func() {
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
appProj := oc.Namespace()
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", appProj, "openshift.io/cluster-monitoring=true").Execute()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR")
ls := lokiStack{
name: "loki-55415",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-55415",
storageClass: sc,
bucketName: "logging-loki-55415-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-55415",
namespace: loggingNS,
serviceAccountName: "logcollector-55415",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-55415",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Create Loki Alert and recording rules")
alertingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-alerting-rule-template.yaml")
alertRule := resource{"alertingrule", "my-app-workload-alert", appProj}
defer alertRule.clear(oc)
err = alertRule.applyFromTemplate(oc, "-n", alertRule.namespace, "-f", alertingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
recordingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-recording-rule-template.yaml")
recordingRule := resource{"recordingrule", "my-app-workload-record", appProj}
defer recordingRule.clear(oc)
err = recordingRule.applyFromTemplate(oc, "-n", recordingRule.namespace, "-f", recordingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
g.By("Validate AlertManager support for Cluster-Monitoring under openshift-monitoring")
dirname := "/tmp/" + oc.Namespace() + "-log-alerts"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
files, err := os.ReadDir(dirname)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(files)).To(o.Equal(2)) //since we have config and runtime-config under lokistack-config cm
amURL := "alertmanager_url: https://_web._tcp.alertmanager-operated.openshift-monitoring.svc"
for _, file := range files {
if file.Name() == "config.yaml" {
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(lokiStackConf), amURL)).Should(o.BeTrue())
}
if file.Name() == "runtime-config.yaml" {
lokiStackConf, err := os.ReadFile(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(lokiStackConf), "alertmanager_url")).ShouldNot(o.BeTrue())
}
}
g.By("Query AlertManager for Firing Alerts")
bearerToken := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
queryAlertManagerForActiveAlerts(oc, bearerToken, false, "MyAppLogVolumeIsHigh", 5)
})
g.It("CPaasrunOnly-Author:kbharti-Medium-61435-Loki Operator - Validate AlertManager support for User-workload monitoring[Serial]", func() {
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
appProj := oc.Namespace()
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", appProj, "openshift.io/cluster-monitoring=true").Execute()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR")
ls := lokiStack{
name: "loki-61435",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-61435",
storageClass: sc,
bucketName: "logging-loki-61435-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-61435",
namespace: loggingNS,
serviceAccountName: "logcollector-61435",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-61435",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Enable User Workload Monitoring")
enableUserWorkloadMonitoringForLogging(oc)
defer deleteUserWorkloadManifests(oc)
g.By("Create Loki Alert and recording rules")
alertingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-alerting-rule-template.yaml")
alertRule := resource{"alertingrule", "my-app-workload-alert", appProj}
defer alertRule.clear(oc)
err = alertRule.applyFromTemplate(oc, "-n", alertRule.namespace, "-f", alertingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
recordingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-recording-rule-template.yaml")
recordingRule := resource{"recordingrule", "my-app-workload-record", appProj}
defer recordingRule.clear(oc)
err = recordingRule.applyFromTemplate(oc, "-n", recordingRule.namespace, "-f", recordingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
g.By("Validate AlertManager support for Cluster-Monitoring under openshift-monitoring")
dirname := "/tmp/" + oc.Namespace() + "-log-alerts"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
files, err := os.ReadDir(dirname)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(files)).To(o.Equal(2)) //since we have config and runtime-config under lokistack-config cm
amURL := "alertmanager_url: https://_web._tcp.alertmanager-operated.openshift-monitoring.svc"
userWorkloadAMURL := "alertmanager_url: https://_web._tcp.alertmanager-operated.openshift-user-workload-monitoring.svc"
for _, file := range files {
if file.Name() == "config.yaml" {
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(lokiStackConf), amURL)).Should(o.BeTrue())
}
if file.Name() == "runtime-config.yaml" {
lokiStackConf, err := os.ReadFile(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(lokiStackConf), userWorkloadAMURL)).Should(o.BeTrue())
}
}
g.By("Query User workload AlertManager for Firing Alerts")
defer removeClusterRoleFromServiceAccount(oc, appProj, "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, appProj, "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", appProj)
//token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
queryAlertManagerForActiveAlerts(oc, bearerToken, true, "MyAppLogVolumeIsHigh", 5)
})
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease Flow control testing", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("logging-flow-control", exutil.KubeConfigPath())
loggingBaseDir, s, sc, jsonLogFile string
)
g.BeforeEach(func() {
s = getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ = getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a proper storage class for this test!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
exutil.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
oc.SetupProject()
})
g.It("Author:qitang-CPaasrunOnly-Medium-76114-Controlling log flow rates per container from selected containers by containerLimit.[Serial][Slow]", func() {
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
exutil.By("Create 3 pods in one project")
multiplePods := oc.Namespace()
for i := 0; i < 3; i++ {
err := oc.WithoutNamespace().Run("new-app").Args("-n", multiplePods, "-f", jsonLogFile, "-p", "RATE=3000", "-p", "CONFIGMAP=logtest-config-"+strconv.Itoa(i), "-p", "REPLICATIONCONTROLLER=logging-centos-logtest-"+strconv.Itoa(i)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Create 3 projects and create one pod in each project")
namespaces := []string{}
for i := 0; i < 3; i++ {
nsName := "logging-flow-control-" + getRandomString()
namespaces = append(namespaces, nsName)
oc.CreateSpecifiedNamespaceAsAdmin(nsName)
defer oc.DeleteSpecifiedNamespaceAsAdmin(nsName)
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-n", nsName, "-f", jsonLogFile, "-p", "RATE=3000", "-p", "LABELS={\"logging-flow-control\": \"centos-logtest\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Create a pod with 3 containers")
multiContainer := filepath.Join(loggingBaseDir, "generatelog", "multi_container_json_log_template.yaml")
oc.SetupProject()
multipleContainers := oc.Namespace()
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-n", multipleContainers, "-f", multiContainer, "-p", "RATE=3000", "-p", "LABELS={\"multiple-containers\": \"centos-logtest\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-76114",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-76114",
storageClass: sc,
bucketName: "logging-loki-76114-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-76114",
namespace: loggingNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
serviceAccountName: "logcollector-76114",
secretName: "lokistack-secret-76114",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "INPUT_REFS=[\"application\"]")
patch := fmt.Sprintf(`[{"op": "add", "path": "/spec/inputs", "value": [{"application": {"includes": [{"namespace": %s}], "tuning": {"rateLimitPerContainer": {"maxRecordsPerSecond": 10}}}, "name": "limited-rates-1", "type": "application"}, {"application": {"selector": {"matchLabels": {"logging-flow-control": "centos-logtest"}}, "tuning": {"rateLimitPerContainer": {"maxRecordsPerSecond": 20}}}, "name": "limited-rates-2", "type": "application"}, {"application": {"selector": {"matchLabels": {"multiple-containers": "centos-logtest"}}, "tuning": {"rateLimitPerContainer": {"maxRecordsPerSecond": 30}}}, "name": "limited-rates-3", "type": "application"}]},{ "op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["limited-rates-1","limited-rates-2","limited-rates-3"]}]`, multiplePods)
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
// sleep 3 minutes for the log to be collected
time.Sleep(3 * time.Minute)
exutil.By("Check data in lokistack")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
//ensure logs from each project are collected
for _, ns := range namespaces {
lc.waitForLogsAppearByProject("application", ns)
}
lc.waitForLogsAppearByProject("application", multipleContainers)
lc.waitForLogsAppearByProject("application", multiplePods)
exutil.By("for logs in project/" + multiplePods + ", the count of each container in one minute should be ~10*60")
re, _ := lc.query("application", "sum by(kubernetes_pod_name)(count_over_time({kubernetes_namespace_name=\""+multiplePods+"\"}[1m]))", 30, false, time.Now())
o.Expect(len(re.Data.Result) > 0).Should(o.BeTrue())
for _, r := range re.Data.Result {
// check the penultimate value
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, err := strconv.Atoi(c)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count <= 650).To(o.BeTrue(), fmt.Sprintf("the count is %d, however the expect value is 600", count))
}
exutil.By("for logs in projects logging-flow-control-*, the count of each container in one minute should be ~20*60")
// get `400 Bad Request` when querying with `sum by(kubernetes_pod_name)(count_over_time({kubernetes_namespace_name=~\"logging-flow-control-.+\"}[1m]))`
for _, ns := range namespaces {
res, _ := lc.query("application", "sum by(kubernetes_pod_name)(count_over_time({kubernetes_namespace_name=\""+ns+"\"}[1m]))", 30, false, time.Now())
o.Expect(len(res.Data.Result) > 0).Should(o.BeTrue())
for _, r := range res.Data.Result {
// check the penultimate value
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, _ := strconv.Atoi(c)
o.Expect(count <= 1300).To(o.BeTrue(), fmt.Sprintf("the count is %d, however the expect value is 1200", count))
}
}
exutil.By("for logs in project/" + multipleContainers + ", the count of each container in one minute should be ~30*60")
r, _ := lc.query("application", "sum by(kubernetes_container_name)(count_over_time({kubernetes_namespace_name=\""+multipleContainers+"\"}[1m]))", 30, false, time.Now())
o.Expect(len(r.Data.Result) > 0).Should(o.BeTrue())
for _, r := range r.Data.Result {
// check the penultimate value
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, _ := strconv.Atoi(c)
o.Expect(count <= 1950).To(o.BeTrue(), fmt.Sprintf("the count is %d, however the expect value is 1800", count))
}
})
g.It("Author:qitang-CPaasrunOnly-Medium-76115-Controlling the flow rate per destination to selected outputs.[Serial][Slow]", func() {
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
exutil.By("Create pod to generate some logs")
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile, "-p", "RATE=3000", "-p", "REPLICAS=3").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
podNodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", appProj, "-ojsonpath={.items[*].spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNames := strings.Split(podNodeName, " ")
exutil.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-76115",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-76115",
storageClass: sc,
bucketName: "logging-loki-76115-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("Deploy non-logging managed log stores")
oc.SetupProject()
loki := externalLoki{
name: "loki-server",
namespace: oc.Namespace(),
}
defer loki.remove(oc)
loki.deployLoki(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-76115",
namespace: loggingNS,
serviceAccountName: "logcollector-76115",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-76115",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
patch := `[{"op": "add", "path": "/spec/outputs/-", "value": {"name":"loki-server","type":"loki", "loki": {"url":"http://` + loki.name + `.` + loki.namespace + `.svc:3100"}, "rateLimit": {"maxRecordsPerSecond": 10}}}, {"op": "add", "path": "/spec/pipelines/0/outputRefs/-", "value": "loki-server"}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
// sleep 3 minutes for the log to be collected
time.Sleep(3 * time.Minute)
exutil.By("check data in user-managed loki, the count of logs from each node in one minute should be ~10*60")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
lc.waitForLogsAppearByProject("", appProj)
res, _ := lc.query("", "sum by(kubernetes_host)(count_over_time({log_type=~\".+\"}[1m]))", 30, false, time.Now())
o.Expect(len(res.Data.Result) > 0).Should(o.BeTrue())
for _, r := range res.Data.Result {
// check the penultimate value
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, _ := strconv.Atoi(c)
o.Expect(count <= 650).To(o.BeTrue(), fmt.Sprintf("the count is %d, however the expect value is 600", count))
}
exutil.By("check data in lokistack, there should not have rate limitation")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
routeLokiStack := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lokistackClient := newLokiClient(routeLokiStack).withToken(bearerToken).retry(5)
for _, nodeName := range nodeNames {
// only check app logs, because for infra and audit logs, we don't know how many logs the OCP generates in one minute
res, _ := lokistackClient.query("application", "sum by(kubernetes_host)(count_over_time({kubernetes_host=\""+nodeName+"\"}[1m]))", 30, false, time.Now())
o.Expect(len(res.Data.Result) > 0).Should(o.BeTrue())
for _, r := range res.Data.Result {
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, _ := strconv.Atoi(c)
o.Expect(count >= 2900).Should(o.BeTrue())
}
}
})
g.It("Author:qitang-CPaasrunOnly-Medium-65195-Controlling log flow rates - different output with different rate", func() {
exutil.By("Create pod to generate some logs")
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile, "-p", "RATE=3000").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploy non-logging managed log stores")
oc.SetupProject()
logStoresNS := oc.Namespace()
loki := externalLoki{
name: "loki-server",
namespace: logStoresNS,
}
defer loki.remove(oc)
loki.deployLoki(oc)
es := externalES{
namespace: logStoresNS,
version: "8",
serverName: "elasticsearch-8",
loggingNS: logStoresNS,
}
defer es.remove(oc)
es.deploy(oc)
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: logStoresNS,
tls: false,
loggingNS: logStoresNS,
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
exutil.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-65195",
namespace: logStoresNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=http://"+loki.name+"."+loki.namespace+".svc:3100")
patch := fmt.Sprintf(`{"spec": {"outputs": [{"name":"loki-server","type":"loki","loki":{"url":"http://%s.%s.svc:3100"},"rateLimit": {"maxRecordsPerSecond": 20}}, {"name":"rsyslog-server","type":"syslog","syslog":{"url":"udp://%s.%s.svc:514","rfc":"RFC5424"},"rateLimit": {"maxRecordsPerSecond": 30}}, {"name":"elasticsearch-server","type":"elasticsearch","rateLimit":{"maxRecordsPerSecond": 10},"elasticsearch":{"version":8,"url":"http://%s.%s.svc:9200","index":"{.log_type||\"none-typed-logs\"}"}}]}}`, loki.name, loki.namespace, rsyslog.serverName, rsyslog.namespace, es.serverName, es.namespace)
clf.update(oc, "", patch, "--type=merge")
outputRefs := `[{"op": "replace", "path": "/spec/pipelines/0/outputRefs", "value": ["loki-server", "rsyslog-server", "elasticsearch-server"]}]`
clf.update(oc, "", outputRefs, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("check collector pods' configuration")
lokiConfig := `[transforms.output_loki_server_throttle]
type = "throttle"
inputs = ["pipeline_forward_to_loki_viaqdedot_2"]
window_secs = 1
threshold = 20`
rsyslogConfig := `[transforms.output_rsyslog_server_throttle]
type = "throttle"
inputs = ["pipeline_forward_to_loki_viaqdedot_2"]
window_secs = 1
threshold = 30`
esConfig := `[transforms.output_elasticsearch_server_throttle]
type = "throttle"
inputs = ["pipeline_forward_to_loki_viaqdedot_2"]
window_secs = 1
threshold = 10`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", lokiConfig, rsyslogConfig, esConfig)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "some of the configuration is not in vector.toml")
// sleep 3 minutes for the log to be collected
time.Sleep(3 * time.Minute)
exutil.By("check data in loki, the count of logs from each node in one minute should be ~20*60")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
res, _ := lc.query("", "sum by(kubernetes_host)(count_over_time({log_type=~\".+\"}[1m]))", 30, false, time.Now())
o.Expect(len(res.Data.Result) > 0).Should(o.BeTrue())
for _, r := range res.Data.Result {
// check the penultimate value
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, _ := strconv.Atoi(c)
o.Expect(count <= 1300).To(o.BeTrue(), fmt.Sprintf("the count is %d, however the expect value is 1200", count))
}
//TODO: find a way to check the doc count in rsyslog and es8
/*
exutil.By("check data in ES, the count of logs from each node in one minute should be ~10*60")
for _, node := range nodeNames {
query := `{"query": {"bool": {"must": [{"match_phrase": {"hostname.keyword": "` + node + `"}}, {"range": {"@timestamp": {"gte": "now-1m/m", "lte": "now/m"}}}]}}}`
count, err := es.getDocCount(oc, "", query)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count <= 700).Should(o.BeTrue(), fmt.Sprintf("The increased count in %s in 1 minute is: %d", node, count))
}
*/
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease Audit Policy Testing", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("logging-audit-policy", exutil.KubeConfigPath())
loggingBaseDir, s, sc string
)
g.BeforeEach(func() {
s = getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ = getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a storage class for this test!")
}
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
exutil.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
oc.SetupProject()
})
g.It("Author:qitang-CPaasrunOnly-Critical-75841-Filter audit logs and forward to log store.[Serial]", func() {
exutil.By("Deploying LokiStack")
ls := lokiStack{
name: "loki-75841",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-75841",
storageClass: sc,
bucketName: "logging-loki-75841-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-75841",
namespace: loggingNS,
serviceAccountName: "logcollector-75841",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "audit-policy.yaml"),
secretName: "lokistack-secret-75841",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("wait for audit logs to be collected")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
// sleep 3 minutes for logs to be collected
time.Sleep(3 * time.Minute)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
exutil.By("check if the audit policy is applied to audit logs or not")
//404,409,422,429
e2e.Logf("should not find logs with responseStatus.code: 404/409/422/429")
for _, code := range []string{"404", "409", "422", "429"} {
log, err := lc.searchLogsInLoki("audit", "{log_type=\"audit\" } | json | responseStatus_code=\""+code+"\"")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue(), "Find audit logs with responseStatus_code="+code)
}
e2e.Logf("logs with stage=\"RequestReceived\" should not be collected")
log, err := lc.searchLogsInLoki("audit", "{log_type=\"audit\" } | json | stage=\"RequestReceived\"")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf("log pod changes as RequestResponse level")
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="RequestResponse", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="RequestResponse", objectRef_subresource!~".+", objectRef_apiGroup!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Log "pods/log", "pods/status" as Request level`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource="status"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource="status"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource="binding"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource="binding"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-config-managed", "cm/merged-trusted-image-registry-ca")
e2e.Logf(`Don't log requests to a configmap called "merged-trusted-image-registry-ca"`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="configmaps", objectRef_name="merged-trusted-image-registry-ca"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Log the request body of configmap changes in "openshift-multus"`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level="Request", objectRef_resource="configmaps", objectRef_namespace="openshift-multus"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level!="Request", objectRef_resource="configmaps", objectRef_namespace="openshift-multus"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Log configmap and secret changes in all other namespaces at the RequestResponse level.`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level="RequestResponse", objectRef_resource="configmaps", objectRef_namespace!="openshift-multus"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level!="RequestResponse", objectRef_resource="configmaps", objectRef_namespace!="openshift-multus"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level="RequestResponse", objectRef_resource="secrets"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level!="RequestResponse", objectRef_resource="secrets"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Don't log watch requests by the "system:serviceaccount:openshift-monitoring:prometheus-k8s" on endpoints, services or pods`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | verb="watch", user_username="system:serviceaccount:openshift-monitoring:prometheus-k8s", objectRef_resource="endpoints"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | verb="watch", user_username="system:serviceaccount:openshift-monitoring:prometheus-k8s", objectRef_resource="services"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
//log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | verb="watch", user_username="system:serviceaccount:openshift-monitoring:prometheus-k8s", objectRef_resource="pods"`)
//o.Expect(err).NotTo(o.HaveOccurred())
//o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Don't log authenticated requests to certain non-resource URL paths.`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | requestURI="/metrics"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Log all other resources in core, operators.coreos.com and rbac.authorization.k8s.io at the Request level.`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="operators.coreos.com", level="Request"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="operators.coreos.com", level!="Request"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="rbac.authorization.k8s.io", level="Request"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="rbac.authorization.k8s.io", level!="Request"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="", level="Request", objectRef_resource!="secrets", objectRef_resource!="configmaps", objectRef_resource!="pods", stage=~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="", level!="Request", objectRef_resource!="secrets", objectRef_resource!="configmaps", objectRef_resource!="pods", stage=~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`A catch-all rule to log all other requests at the Metadata level.`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level="Metadata"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
})
g.It("CPaasrunOnly-Author:qitang-High-67421-Separate policies can be applied on separate pipelines.[Serial]", func() {
exutil.By("Deploying an external log store")
es := externalES{
namespace: oc.Namespace(),
loggingNS: loggingNS,
version: "8",
serverName: "external-es",
httpSSL: false,
}
defer es.remove(oc)
es.deploy(oc)
exutil.By("Deploying LokiStack")
ls := lokiStack{
name: "loki-67421",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-67421",
storageClass: sc,
bucketName: "logging-loki-67421-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-67421",
namespace: loggingNS,
serviceAccountName: "logcollector-67421",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "67421.yaml"),
secretName: "lokistack-secret-67421",
collectAuditLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "ES_VERSION="+es.version, "ES_URL=http://"+es.serverName+"."+es.namespace+".svc:9200")
// sleep 3 minutes for logs to be collected
time.Sleep(3 * time.Minute)
es.waitForIndexAppear(oc, "audit")
exutil.By("check data in logs stores")
count, err := es.getDocCount(oc, "audit", `{"query": {"term": {"stage": "RequestReceived"}}}`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue())
count, err = es.getDocCount(oc, "audit", `{"query": {"bool": {"must": [{"term": {"objectRef.resource": "pods"}},{"match": {"level": "RequestResponse"}}]}}}`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count > 0).Should(o.BeTrue())
count, err = es.getDocCount(oc, "audit", `{"query": {"bool": {"must": [{"term": {"objectRef.resource": "pods"}}, {"terms": {"objectRef.subresource": ["status", "binding"]}}, {"match": {"level": "Request"}}]}}}`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue())
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
log, err := lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource="status"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource="status"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource="binding"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource="binding"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
})
g.It("CPaasrunOnly-Author:qitang-Medium-68318-Multiple policies can be applied to one pipeline.[Serial]", func() {
exutil.By("Deploying LokiStack")
ls := lokiStack{
name: "loki-68318",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-68318",
storageClass: sc,
bucketName: "logging-loki-68318-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-68318",
namespace: loggingNS,
serviceAccountName: "logcollector-68318",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "68318.yaml"),
secretName: "lokistack-secret-68318",
collectAuditLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
// sleep 3 minutes for logs to be collected
time.Sleep(3 * time.Minute)
exutil.By("generate some audit logs")
pod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", cloNS, "-l", "name=cluster-logging-operator", "-ojsonpath={.items[0].metadata.name}").Output()
oc.AsAdmin().NotShowInfo().WithoutNamespace().Run("logs").Args("-n", cloNS, pod).Execute()
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
e2e.Logf("logs with stage=\"RequestReceived\" should not be collected")
log, err := lc.searchLogsInLoki("audit", "{log_type=\"audit\" } | json | stage=\"RequestReceived\"")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf("log pod changes as Request level")
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Log secret changes in all namespaces at the Request level.`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level="Request", objectRef_resource="secrets"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level!="Request", objectRef_resource="secrets"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
exutil.By("Update the order of filters in filterRefs")
clf.update(oc, "", `[{"op": "replace", "path": "/spec/pipelines/0/filterRefs", "value": ["my-policy-1", "my-policy-0"]}]`, "--type=json")
clf.waitForCollectorPodsReady(oc)
// sleep 3 minutes for logs to be collected
time.Sleep(3 * time.Minute)
e2e.Logf("log pod changes as RequestResponse level")
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="RequestResponse", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease Loki Fine grained logs access testing", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("loki-logs-access", exutil.KubeConfigPath())
loggingBaseDir, s, sc string
)
g.BeforeEach(func() {
s = getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ = getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a storage class for this test!")
}
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
})
g.It("CPaasrunOnly-Author:kbharti-Critical-67565-High-55388-Verify that non-admin/regular user can access logs and query rules as per rolebindings assigned to the user[Serial][Slow]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
exutil.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-67565",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-67565",
storageClass: sc,
bucketName: "logging-loki-67565-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-67565",
namespace: loggingNS,
serviceAccountName: "logcollector-67565",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-67565",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
// Creating cluster roles to allow read access from LokiStack
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
g.By("Create app project with non-admin/regular user")
oc.SetupProject()
userName := oc.Username()
appProj := oc.Namespace()
bearerToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", appProj, "openshift.io/cluster-monitoring=true").Execute()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create Loki Alerting rule")
appAlertingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-alerting-rule-template.yaml")
params := []string{"-f", appAlertingTemplate, "-p", "NAMESPACE=" + appProj}
err = oc.Run("create").Args("-f", exutil.ProcessTemplate(oc, params...), "-n", appProj).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
g.By("Validate that user cannot access logs and rules of owned namespace without RBAC - 403 Auth exception")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
_, err = lc.searchByNamespace("application", appProj)
o.Expect(err).To(o.HaveOccurred())
_, err = lc.queryRules("application", appProj)
o.Expect(err).To(o.HaveOccurred())
g.By("Create Role-binding to access logs and rules of owned project")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "cluster-logging-application-view", userName, "-n", appProj).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Validate user can access logs and rules of owned namespace after RBAC is created - Success flow")
lc.waitForLogsAppearByProject("application", appProj)
appRules, err := lc.queryRules("application", appProj)
o.Expect(err).NotTo(o.HaveOccurred())
matchDataInResponse := []string{"name: MyAppLogVolumeAlert", "alert: MyAppLogVolumeIsHigh", "tenantId: application"}
for _, matchedData := range matchDataInResponse {
if !strings.Contains(string(appRules), matchedData) {
e2e.Failf("Response is missing %s", matchedData)
}
}
e2e.Logf("Rules API response validated succesfully")
})
g.It("CPaasrunOnly-Author:kbharti-Critical-67643-Verify logs access for LokiStack adminGroups[Serial][Slow]", func() {
g.By("Create Groups with users")
oc.SetupProject()
user1 := oc.Username()
user1Token, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "infra-admin-group-67643").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("group", "infra-admin-group-67643").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "infra-admin-group-67643", user1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
user2 := oc.Username()
user2Token, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "audit-admin-group-67643").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("group", "audit-admin-group-67643").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "audit-admin-group-67643", user2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack with adminGroups")
exutil.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-67643",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-67643",
storageClass: sc,
bucketName: "logging-loki-67643-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "-p", "ADMIN_GROUPS=[\"audit-admin-group-67643\",\"infra-admin-group-67643\"]")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-67643",
namespace: loggingNS,
serviceAccountName: "logcollector-67643",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-67643",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
// Creating cluster roles to allow read access from LokiStack
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
g.By("Create RBAC for groups to access infra/audit logs")
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-infrastructure-view", "infra-admin-group-67643").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-infrastructure-view", "infra-admin-group-67643").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-audit-view", "audit-admin-group-67643").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-audit-view", "audit-admin-group-67643").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check Logs Access with users from AdminGroups")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(user1Token).retry(5)
lc.waitForLogsAppearByKey("infrastructure", "log_type", "infrastructure")
lc = newLokiClient(route).withToken(user2Token).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease Loki - Efficient OTEL Support", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("loki-otel-support", exutil.KubeConfigPath())
loggingBaseDir, s, sc string
)
g.BeforeEach(func() {
s = getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ = getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a storage class for this test!")
}
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
})
g.It("CPaasrunOnly-Author:kbharti-High-70683-Medium-70684-Validate new Loki installations support TSDBv3 and v13 storage schema and automatic stream sharding[Serial]", func() {
g.By("Deploy Loki stack with v13 schema and tsdb store")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-70683",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-70683",
storageClass: sc,
bucketName: "logging-loki-70683-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-70683",
namespace: loggingNS,
serviceAccountName: "logcollector-70683",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-70683",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Extracting Loki config ...")
dirname := "/tmp/" + oc.Namespace() + "-loki-otel-support"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Validate Loki is using v13 schema in config")
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
storageSchemaConfig := StorageSchemaConfig{}
err = yaml.Unmarshal(lokiStackConf, &storageSchemaConfig)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(storageSchemaConfig.SchemaConfig.Configs[0].Schema).Should(o.Equal("v13"))
o.Expect(storageSchemaConfig.SchemaConfig.Configs[0].Store).Should(o.Equal("tsdb"))
g.By("Validate Automatic stream sharding")
lokiLimitsConfig := LokiLimitsConfig{}
err = yaml.Unmarshal(lokiStackConf, &lokiLimitsConfig)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(lokiLimitsConfig.LimitsConfig.ShardStreams.Enabled).Should(o.Equal(true))
o.Expect(lokiLimitsConfig.LimitsConfig.ShardStreams.DesiredRate).Should(o.Equal("3MB"))
o.Expect(lokiLimitsConfig.LimitsConfig.AllowStructuredMetadata).Should(o.Equal(true))
g.By("Check exposed metrics for Loki Stream Sharding")
defer removeClusterRoleFromServiceAccount(oc, cloNS, "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, cloNS, "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", cloNS)
for _, metric := range []string{"loki_rate_store_refresh_failures_total", "loki_rate_store_streams", "loki_rate_store_max_stream_shards", "loki_rate_store_max_stream_rate_bytes", "loki_rate_store_max_unique_stream_rate_bytes", "loki_stream_sharding_count"} {
e2e.Logf("Checking metric: %s", metric)
checkMetric(oc, bearerToken, metric, 3)
}
g.By("Override default value for desired stream sharding rate on tenants")
patchConfig := `
spec:
limits:
global:
ingestion:
perStreamDesiredRate: 4
tenants:
application:
ingestion:
perStreamDesiredRate: 5
audit:
ingestion:
perStreamDesiredRate: 6
`
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("lokistack", ls.name, "-n", ls.namespace, "--type", "merge", "-p", patchConfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
_, err = exec.Command("bash", "-c", "rm -rf "+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
//Validating config.yaml below
lokiStackConf, err = os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
err = yaml.Unmarshal(lokiStackConf, &lokiLimitsConfig)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(lokiLimitsConfig.LimitsConfig.ShardStreams.Enabled).Should(o.Equal(true))
o.Expect(lokiLimitsConfig.LimitsConfig.ShardStreams.DesiredRate).Should(o.Equal("4MB"))
//Validating runtime-config.yaml below
overridesConfig, err := os.ReadFile(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
runtimeConfig := RuntimeConfig{}
err = yaml.Unmarshal(overridesConfig, &runtimeConfig)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(runtimeConfig.Overrides.Application.ShardStreams.DesiredRate).Should(o.Equal("5MB"))
o.Expect(runtimeConfig.Overrides.Audit.ShardStreams.DesiredRate).Should(o.Equal("6MB"))
e2e.Logf("Overrides validated successfully!")
})
g.It("CPaasrunOnly-Author:kbharti-High-70714-Show warning to user for upgrading to TSDBv3 store and v13 schema[Serial]", func() {
// The Alert will be only be shown on a tshirt size of 1x.extra-small and greater
if !validateInfraAndResourcesForLoki(oc, "35Gi", "16") {
g.Skip("Current platform not supported/resources not available for this test!")
}
g.By("Deploy Loki stack with v12 schema and bolt-db shipper")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-70714",
namespace: loggingNS,
tSize: "1x.extra-small",
storageType: s,
storageSecret: "storage-secret-70714",
storageClass: sc,
bucketName: "logging-loki-70714-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "-p", "STORAGE_SCHEMA_VERSION=v12")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
defer removeClusterRoleFromServiceAccount(oc, ls.namespace, "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, ls.namespace, "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
token := getSAToken(oc, "default", ls.namespace)
queryAlertManagerForActiveAlerts(oc, token, false, "LokistackSchemaUpgradesRequired", 5)
e2e.Logf("Alert LokistackSchemaUpgradesRequired is firing...")
})
g.It("Author:kbharti-CPaasrunOnly-Medium-70685-Validate support for blocking queries on Loki[Serial]", func() {
g.By("Create 3 application generator projects")
oc.SetupProject()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
appProj1 := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
appProj2 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj2, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
appProj3 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj3, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy Loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-70685",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-70685",
storageClass: sc,
bucketName: "logging-loki-70685-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
// patch spec to block specific queries
patchConfig := `
spec:
limits:
tenants:
application:
queries:
blocked:
- pattern: '{kubernetes_namespace_name="%s"}'
- pattern: '.*%s.*'
regex: true
`
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("lokistack", ls.name, "-n", ls.namespace, "--type", "merge", "-p", fmt.Sprintf(patchConfig, appProj1, appProj2)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-70685",
namespace: loggingNS,
serviceAccountName: "logcollector-70685",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-70685",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
defer removeClusterRoleFromServiceAccount(oc, ls.namespace, "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, ls.namespace, "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", ls.namespace)
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
g.By("Validate queries are blocked as per the spec config")
_, err = lc.searchByNamespace("application", appProj1)
// Cannot query {kubernetes_namespace_name="appProj1"} since this query is blocked by policy
o.Expect(err).To(o.HaveOccurred())
_, err = lc.searchByNamespace("application", appProj2)
// Any query containing appProj2 would be blocked by policy (regex)
o.Expect(err).To(o.HaveOccurred())
//Success since no blocking policy exists on appProj3
lc.waitForLogsAppearByProject("application", appProj3)
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease - LokiStack with tenant level labelkeys", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("lokistack-labelkeys", exutil.KubeConfigPath())
loggingBaseDir, s, sc string
)
g.BeforeEach(func() {
s = getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ = getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a storage class for this test!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
})
g.It("Author:kbharti-CPaasrunOnly-Critical-75334-Forward logs to lokiStack via clusterLogForwarder.observability.openshift.io API using per tenant and global labelKeys[Serial]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
exutil.By("Create an application")
oc.SetupProject()
user1 := oc.Username()
appProj := oc.Namespace()
userToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a group and role bindings to access loki logs")
defer oc.AsAdmin().Run("delete").Args("group", "admin-group-75334").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "admin-group-75334").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "admin-group-75334", user1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-infrastructure-view", "admin-group-75334").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-infrastructure-view", "admin-group-75334").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-audit-view", "admin-group-75334").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-audit-view", "admin-group-75334").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-application-view", "admin-group-75334").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-application-view", "admin-group-75334").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploying LokiStack with adminGroup")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-75334",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-75334",
storageClass: sc,
bucketName: "logging-loki-75334-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "-p", "ADMIN_GROUPS=[\"admin-group-75334\"]")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to Lokistack")
clf := clusterlogforwarder{
name: "instance-75334",
namespace: loggingNS,
serviceAccountName: "logcollector-75334",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack-with-labelkeys.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "APP_LABELKEYS=[\"kubernetes.container_name\"]", "IGNORE_GLOBAL_INFRA=true", "INFRA_LABELKEYS=[\"kubernetes.namespace_name\"]", "GLOBAL_LABELKEYS=[\"log_type\"]")
exutil.By("Check that logs are forwarded to LokiStack")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(userToken).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type ", "audit")
lc.waitForLogsAppearByKey("infrastructure", "kubernetes_namespace_name", "openshift-monitoring")
lc.waitForLogsAppearByKey("application", "log_type", "application")
// Get some pod and container names under extracted infra logs
logs, err := lc.searchByKey("infrastructure", "kubernetes_namespace_name", "openshift-monitoring")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs := extractLogEntities(logs)
var infraLogPodNames []string
var infraLogContainerNames []string
for _, log := range extractedLogs {
infraLogPodNames = append(infraLogPodNames, log.Kubernetes.PodName)
infraLogContainerNames = append(infraLogContainerNames, log.Kubernetes.ContainerName)
}
exutil.By("Validating application logs with labelKeys")
// Since global labelkeys is defined as 'log_type' and application labelkeys is defined as 'kubernetes.container_name' with ignoreGlobal as 'false',
// application tenant can be queried with 'log_type' and 'kubernetes_container_name' keys only.
// Query with key 'kubernetes_namespace_name' - should yield an empty response
logs, err = lc.searchByKey("application", "kubernetes_namespace_name", appProj)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
// Query with key 'kubernetes_pod_name' - should yield an empty response
podList, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
logs, err = lc.searchByKey("application", "kubernetes_pod_name", podList.Items[0].Name)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
// Query with key 'kubernetes_container_name' - should yield a NON empty response
logs, err = lc.searchByKey("application", "kubernetes_container_name", "logging-centos-logtest")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) != 0).Should(o.BeTrue())
e2e.Logf("Validation with application labelKeys is success")
exutil.By("Validating infrastructure log streams with labelKeys")
// Since global labelkeys is defined as 'log_type' BUT infrastructure labelkeys is defined as 'kubernetes.namespace_name' with ignoreGlobal as 'true',
// Infrastructure tenant can be queried with 'kubernetes_namespace_name' key only.
// Query with key 'log_type' - should yield an empty response
logs, err = lc.searchByKey("infrastructure", "log_type", "infrastructure")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
// Query with key 'kubernetes_pod_name' - should yield an empty response
for _, pod := range infraLogPodNames {
logs, err = lc.searchByKey("infrastructure", "kubernetes_pod_name", pod)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
}
// Query with key 'kubernetes_container_name' - should yield a empty response
for _, container := range infraLogContainerNames {
logs, err := lc.searchByKey("infrastructure", "kubernetes_container_name", container)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
}
e2e.Logf("Validation with infrastructure labelKeys is success")
})
g.It("Author:kbharti-CPaasrunOnly-High-75369-Forward logs to lokiStack via ClusterLogForwarder.observability.openshift.io API using per tenant keys and no global overrides[Serial]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
exutil.By("Create an application")
oc.SetupProject()
user1 := oc.Username()
appProj := oc.Namespace()
userToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a group and role bindings to access loki logs")
defer oc.AsAdmin().Run("delete").Args("group", "admin-group-75369").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "admin-group-75369").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "admin-group-75369", user1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-infrastructure-view", "admin-group-75369").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-infrastructure-view", "admin-group-75369").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-audit-view", "admin-group-75369").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-audit-view", "admin-group-75369").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-application-view", "admin-group-75369").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-application-view", "admin-group-75369").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploying LokiStack with adminGroup")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-75369",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-75369",
storageClass: sc,
bucketName: "logging-loki-75369-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "-p", "ADMIN_GROUPS=[\"admin-group-75369\"]")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to Lokistack")
clf := clusterlogforwarder{
name: "instance-75369",
namespace: loggingNS,
serviceAccountName: "logcollector-75369",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack-with-labelkeys.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "APP_LABELKEYS=[\"kubernetes.labels.test\"]", "IGNORE_GLOBAL_INFRA=true", "INFRA_LABELKEYS=[\"kubernetes.namespace_name\"]", "GLOBAL_LABELKEYS=[]")
exutil.By("Check that logs are forwarded to LokiStack")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(userToken).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
lc.waitForLogsAppearByKey("application", "log_type", "application")
lc.waitForLogsAppearByKey("infrastructure", "kubernetes_namespace_name", "openshift-monitoring")
// Get some pod and container names under extracted infra logs
logs, err := lc.searchByKey("infrastructure", "kubernetes_namespace_name", "openshift-monitoring")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs := extractLogEntities(logs)
var infraLogPodNames []string
var infraLogContainerNames []string
for _, log := range extractedLogs {
infraLogPodNames = append(infraLogPodNames, log.Kubernetes.PodName)
infraLogContainerNames = append(infraLogContainerNames, log.Kubernetes.ContainerName)
}
exutil.By("Validating application logs with labelKeys")
// Since global labelkeys are 'undefined/not overridden' and application labelkeys is defined as 'kubernetes.labels.test' with ignoreGlobal as 'false',
// application tenant can be queried with the default labelKeys and 'kubernetes.labels.test' keys.
// Query with key 'kubernetes_namespace_name' - should yield a NON empty response
logs, err = lc.searchByKey("application", "kubernetes_namespace_name", appProj)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) != 0).Should(o.BeTrue())
// Query with key 'kubernetes_pod_name' - should yield a NON empty response
podList, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
logs, err = lc.searchByKey("application", "kubernetes_pod_name", podList.Items[0].Name)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) != 0).Should(o.BeTrue())
// Query with key 'kubernetes_container_name' - should yield a NON empty response
logs, err = lc.searchByKey("application", "kubernetes_container_name", "logging-centos-logtest")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) != 0).Should(o.BeTrue())
// Query with key 'kubernetes.labels.test' - should yield a NON empty response
logs, err = lc.searchByKey("application", "kubernetes_labels_test", "centos-logtest")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) != 0).Should(o.BeTrue())
e2e.Logf("Validation with application labelKeys is success")
exutil.By("Validating infrastructure log streams with labelKeys")
// Since global labelkeys is 'undefined/not overridden' BUT infrastructure labelkeys is defined as 'kubernetes.namespace_name' with ignoreGlobal as 'true',
// Infrastructure tenant can be queried with 'kubernetes_namespace_name' key only.
// Query with key 'log_type' - should yield an empty response
logs, err = lc.searchByKey("infrastructure", "log_type", "infrastructure")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
// Query with key 'kubernetes_pod_name' - should yield an empty response
for _, pod := range infraLogPodNames {
logs, err = lc.searchByKey("infrastructure", "kubernetes_pod_name", pod)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
}
// Query with key 'kubernetes_container_name' - should yield a empty response
for _, container := range infraLogContainerNames {
logs, err := lc.searchByKey("infrastructure", "kubernetes_container_name", container)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
}
e2e.Logf("Validation with infrastructure labelKeys is success")
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease - LokiStack with OTLP support", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("lokistack-otlp-flow", exutil.KubeConfigPath())
loggingBaseDir, s, sc string
)
g.BeforeEach(func() {
s = getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ = getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a storage class for this test!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
})
g.It("Author:kbharti-CPaasrunOnly-Critical-76990-Verify that LokiStack provides a default set of otlp configuration[Serial]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
exutil.By("Create an application")
oc.SetupProject()
user1 := oc.Username()
appProj := oc.Namespace()
userToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a group and role bindings to access loki logs")
defer oc.AsAdmin().Run("delete").Args("group", "admin-group-76990").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "admin-group-76990").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "admin-group-76990", user1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-infrastructure-view", "admin-group-76990").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-infrastructure-view", "admin-group-76990").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-audit-view", "admin-group-76990").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-audit-view", "admin-group-76990").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-application-view", "admin-group-76990").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-application-view", "admin-group-76990").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploying LokiStack with adminGroup")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-76990",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-76990",
storageClass: sc,
bucketName: "logging-loki-76990-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "ADMIN_GROUPS=[\"admin-group-76990\"]")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to Lokistack")
clf := clusterlogforwarder{
name: "instance-76990",
namespace: loggingNS,
serviceAccountName: "logcollector-76990",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-76990",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "DATAMODEL=Otel", `TUNING={"compression": "none"}`)
exutil.By("Extracting Loki config ...")
dirname := "/tmp/" + oc.Namespace() + "-lokistack-otlp-support"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Validate the default OTLP configuration under lokiStack config")
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
lokiLimitsConfig := LokiLimitsConfig{}
err = yaml.Unmarshal(lokiStackConf, &lokiLimitsConfig)
o.Expect(err).NotTo(o.HaveOccurred())
// Default expected OTLP configuration under limits_config
defaultOTLPConfig := `
resource_attributes:
attributes_config:
- action: index_label
attributes:
- k8s.container.name
- k8s.cronjob.name
- k8s.daemonset.name
- k8s.deployment.name
- k8s.job.name
- k8s.namespace.name
- k8s.node.name
- k8s.pod.name
- k8s.statefulset.name
- kubernetes.container_name
- kubernetes.host
- kubernetes.namespace_name
- kubernetes.pod_name
- log_source
- log_type
- openshift.cluster.uid
- openshift.log.source
- openshift.log.type
- service.name
- action: structured_metadata
attributes:
- k8s.node.uid
- k8s.pod.uid
- k8s.replicaset.name
- process.command_line
- process.executable.name
- process.executable.path
- process.pid
- action: structured_metadata
regex: k8s\.pod\.labels\..+
- action: structured_metadata
regex: openshift\.labels\..+
log_attributes:
- action: structured_metadata
attributes:
- k8s.event.level
- k8s.event.object_ref.api.group
- k8s.event.object_ref.api.version
- k8s.event.object_ref.name
- k8s.event.object_ref.resource
- k8s.event.request.uri
- k8s.event.response.code
- k8s.event.stage
- k8s.event.user_agent
- k8s.user.groups
- k8s.user.username
- level
- log.iostream
- action: structured_metadata
regex: k8s\.event\.annotations\..+
- action: structured_metadata
regex: systemd\.t\..+
- action: structured_metadata
regex: systemd\.u\..+`
var staticOtlpConfig OtlpConfig
err = yaml.Unmarshal([]byte(defaultOTLPConfig), &staticOtlpConfig)
o.Expect(err).NotTo(o.HaveOccurred())
if reflect.DeepEqual(lokiLimitsConfig.LimitsConfig.OtlpConfig, staticOtlpConfig) {
e2e.Logf("Validated expected default OTLP configuration under lokistack config")
} else {
e2e.Failf("Incorrect default OTLP configuration found. Failing case..")
}
//check logs in loki stack by quering with OTEL semantic attributes
exutil.By("Check logs are received with OTLP semantic convention attributes in loki")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(userToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "openshift_log_type", logType)
}
lc.waitForLogsAppearByKey("application", "k8s_namespace_name", appProj)
lc.waitForLogsAppearByKey("infrastructure", "k8s_namespace_name", "openshift-monitoring")
lc.waitForLogsAppearByKey("application", "k8s_container_name", "logging-centos-logtest")
exutil.By("Validate log streams are pushed to external storage bucket/container")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
})
g.It("Author:kbharti-CPaasrunOnly-High-77345-Verify that LokiStack provides a custom set of otlp configuration with global and per tenant[Serial]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
exutil.By("Create an application")
oc.SetupProject()
user1 := oc.Username()
appProj := oc.Namespace()
userToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a group and role bindings to access loki logs")
defer oc.AsAdmin().Run("delete").Args("group", "admin-group-77345").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "admin-group-77345").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "admin-group-77345", user1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-infrastructure-view", "admin-group-77345").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-infrastructure-view", "admin-group-77345").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-audit-view", "admin-group-77345").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-audit-view", "admin-group-77345").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-application-view", "admin-group-77345").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-application-view", "admin-group-77345").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploying LokiStack with adminGroup")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-77345",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-77345",
storageClass: sc,
bucketName: "logging-loki-77345-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "ADMIN_GROUPS=[\"admin-group-77345\"]")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
// Patch LokiStack CR with a custom otlp configuration
// Here disableRecommendedAttributes enables only the required stream labels when 'true'
customOTLPconfig := `{
"spec": {
"limits": {
"tenants": {
"application": {
"otlp": {
"streamLabels": {
"resourceAttributes": [
{ "name": "k8s.pod.name" }
]
},
"structuredMetadata": {
"logAttributes": [
{ "name": "k8s.pod.uid" }
]
}
}
},
"infrastructure": {
"otlp": {
"streamLabels": {
"resourceAttributes": [
{ "name": "k8s.container.name" }
]
},
"structuredMetadata": {
"logAttributes": [
{ "name": "log.iostream" }
]
}
}
}
}
},
"tenants": {
"mode": "openshift-logging",
"openshift": {
"otlp": {
"disableRecommendedAttributes": true
}
}
}
}
}`
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("lokistack", ls.name, "-n", ls.namespace, "--type", "merge", "-p", customOTLPconfig).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to Lokistack")
clf := clusterlogforwarder{
name: "instance-76990",
namespace: loggingNS,
serviceAccountName: "logcollector-76990",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-76990",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "DATAMODEL=Otel", `TUNING={"compression": "none"}`)
exutil.By("Extracting Loki config ...")
dirname := "/tmp/" + oc.Namespace() + "-lokistack-otlp-support"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Validate the default OTLP configuration under lokiStack config")
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
lokiLimitsConfig := LokiLimitsConfig{}
err = yaml.Unmarshal(lokiStackConf, &lokiLimitsConfig)
o.Expect(err).NotTo(o.HaveOccurred())
// default OTLP config when disableRecommendedAttributes is enabled.
defaultOTLPConfig := `
resource_attributes:
attributes_config:
- action: index_label
attributes:
- k8s.namespace.name
- kubernetes.namespace_name
- log_source
- log_type
- openshift.cluster.uid
- openshift.log.source
- openshift.log.type`
var staticOtlpConfig OtlpConfig
err = yaml.Unmarshal([]byte(defaultOTLPConfig), &staticOtlpConfig)
o.Expect(err).NotTo(o.HaveOccurred())
if reflect.DeepEqual(lokiLimitsConfig.LimitsConfig.OtlpConfig, staticOtlpConfig) {
e2e.Logf("Validated expected default OTLP configuration under lokistack config")
} else {
e2e.Failf("Incorrect default OTLP configuration found. Failing case..")
}
exutil.By("Validate the per tenant OTLP configuration under lokiStack overrides config")
lokiStackConf, err = os.ReadFile(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
// Application tenant
customOtlpconfigForApp := `
resource_attributes:
attributes_config:
- action: index_label
attributes:
- k8s.namespace.name
- k8s.pod.name
- kubernetes.namespace_name
- log_source
- log_type
- openshift.cluster.uid
- openshift.log.source
- openshift.log.type
log_attributes:
- action: structured_metadata
attributes:
- k8s.pod.uid`
err = yaml.Unmarshal([]byte(customOtlpconfigForApp), &staticOtlpConfig)
o.Expect(err).NotTo(o.HaveOccurred())
runtimeConfig := RuntimeConfig{}
err = yaml.Unmarshal(lokiStackConf, &runtimeConfig)
o.Expect(err).NotTo(o.HaveOccurred())
if reflect.DeepEqual(runtimeConfig.Overrides.Application.OtlpConfig, staticOtlpConfig) {
fmt.Println("Validated expected custom OTLP configuration for tenant: application")
} else {
e2e.Failf("Incorrect custom OTLP configuration found for tenant: application. Failing case..")
}
// Infrastructure tenant
customOtlpconfigForInfra := `
resource_attributes:
attributes_config:
- action: index_label
attributes:
- k8s.container.name
- k8s.namespace.name
- kubernetes.namespace_name
- log_source
- log_type
- openshift.cluster.uid
- openshift.log.source
- openshift.log.type
log_attributes:
- action: structured_metadata
attributes:
- log.iostream`
err = yaml.Unmarshal([]byte(customOtlpconfigForInfra), &staticOtlpConfig)
o.Expect(err).NotTo(o.HaveOccurred())
if reflect.DeepEqual(runtimeConfig.Overrides.Infrastructure.OtlpConfig, staticOtlpConfig) {
fmt.Println("Validated expected custom OTLP configuration for tenant: infrastructure")
} else {
e2e.Failf("Incorrect custom OTLP configuration found for tenant: infrastructure. Failing case..")
}
exutil.By("Check logs are received with OTLP semantic convention attributes in loki")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(userToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "openshift_log_type", logType)
}
lc.waitForLogsAppearByKey("application", "k8s_namespace_name", appProj)
lc.waitForLogsAppearByKey("infrastructure", "k8s_namespace_name", "openshift-monitoring")
// No logs found for app tenant with k8s_container_name streamLabel/labelKey since it is not included under custom overrides config
logs, err := lc.searchByKey("application", "k8s_container_name", "logging-centos-logtest")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs := extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
exutil.By("Validate log streams are pushed to external storage bucket/container")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
})
})
|
package logging
| ||||
test case
|
openshift/openshift-tests-private
|
3bed292d-cbe4-4a10-98f5-e417a8a4c120
|
Author:qitang-CPaasrunOnly-Medium-76114-Controlling log flow rates per container from selected containers by containerLimit.[Serial][Slow]
|
['"fmt"', '"path/filepath"', '"strconv"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:qitang-CPaasrunOnly-Medium-76114-Controlling log flow rates per container from selected containers by containerLimit.[Serial][Slow]", func() {
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
exutil.By("Create 3 pods in one project")
multiplePods := oc.Namespace()
for i := 0; i < 3; i++ {
err := oc.WithoutNamespace().Run("new-app").Args("-n", multiplePods, "-f", jsonLogFile, "-p", "RATE=3000", "-p", "CONFIGMAP=logtest-config-"+strconv.Itoa(i), "-p", "REPLICATIONCONTROLLER=logging-centos-logtest-"+strconv.Itoa(i)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Create 3 projects and create one pod in each project")
namespaces := []string{}
for i := 0; i < 3; i++ {
nsName := "logging-flow-control-" + getRandomString()
namespaces = append(namespaces, nsName)
oc.CreateSpecifiedNamespaceAsAdmin(nsName)
defer oc.DeleteSpecifiedNamespaceAsAdmin(nsName)
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-n", nsName, "-f", jsonLogFile, "-p", "RATE=3000", "-p", "LABELS={\"logging-flow-control\": \"centos-logtest\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Create a pod with 3 containers")
multiContainer := filepath.Join(loggingBaseDir, "generatelog", "multi_container_json_log_template.yaml")
oc.SetupProject()
multipleContainers := oc.Namespace()
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-n", multipleContainers, "-f", multiContainer, "-p", "RATE=3000", "-p", "LABELS={\"multiple-containers\": \"centos-logtest\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-76114",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-76114",
storageClass: sc,
bucketName: "logging-loki-76114-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-76114",
namespace: loggingNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
serviceAccountName: "logcollector-76114",
secretName: "lokistack-secret-76114",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "INPUT_REFS=[\"application\"]")
patch := fmt.Sprintf(`[{"op": "add", "path": "/spec/inputs", "value": [{"application": {"includes": [{"namespace": %s}], "tuning": {"rateLimitPerContainer": {"maxRecordsPerSecond": 10}}}, "name": "limited-rates-1", "type": "application"}, {"application": {"selector": {"matchLabels": {"logging-flow-control": "centos-logtest"}}, "tuning": {"rateLimitPerContainer": {"maxRecordsPerSecond": 20}}}, "name": "limited-rates-2", "type": "application"}, {"application": {"selector": {"matchLabels": {"multiple-containers": "centos-logtest"}}, "tuning": {"rateLimitPerContainer": {"maxRecordsPerSecond": 30}}}, "name": "limited-rates-3", "type": "application"}]},{ "op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["limited-rates-1","limited-rates-2","limited-rates-3"]}]`, multiplePods)
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
// sleep 3 minutes for the log to be collected
time.Sleep(3 * time.Minute)
exutil.By("Check data in lokistack")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
//ensure logs from each project are collected
for _, ns := range namespaces {
lc.waitForLogsAppearByProject("application", ns)
}
lc.waitForLogsAppearByProject("application", multipleContainers)
lc.waitForLogsAppearByProject("application", multiplePods)
exutil.By("for logs in project/" + multiplePods + ", the count of each container in one minute should be ~10*60")
re, _ := lc.query("application", "sum by(kubernetes_pod_name)(count_over_time({kubernetes_namespace_name=\""+multiplePods+"\"}[1m]))", 30, false, time.Now())
o.Expect(len(re.Data.Result) > 0).Should(o.BeTrue())
for _, r := range re.Data.Result {
// check the penultimate value
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, err := strconv.Atoi(c)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count <= 650).To(o.BeTrue(), fmt.Sprintf("the count is %d, however the expect value is 600", count))
}
exutil.By("for logs in projects logging-flow-control-*, the count of each container in one minute should be ~20*60")
// get `400 Bad Request` when querying with `sum by(kubernetes_pod_name)(count_over_time({kubernetes_namespace_name=~\"logging-flow-control-.+\"}[1m]))`
for _, ns := range namespaces {
res, _ := lc.query("application", "sum by(kubernetes_pod_name)(count_over_time({kubernetes_namespace_name=\""+ns+"\"}[1m]))", 30, false, time.Now())
o.Expect(len(res.Data.Result) > 0).Should(o.BeTrue())
for _, r := range res.Data.Result {
// check the penultimate value
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, _ := strconv.Atoi(c)
o.Expect(count <= 1300).To(o.BeTrue(), fmt.Sprintf("the count is %d, however the expect value is 1200", count))
}
}
exutil.By("for logs in project/" + multipleContainers + ", the count of each container in one minute should be ~30*60")
r, _ := lc.query("application", "sum by(kubernetes_container_name)(count_over_time({kubernetes_namespace_name=\""+multipleContainers+"\"}[1m]))", 30, false, time.Now())
o.Expect(len(r.Data.Result) > 0).Should(o.BeTrue())
for _, r := range r.Data.Result {
// check the penultimate value
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, _ := strconv.Atoi(c)
o.Expect(count <= 1950).To(o.BeTrue(), fmt.Sprintf("the count is %d, however the expect value is 1800", count))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
fbb66cea-6231-4a75-97a6-1ffb12471e49
|
Author:qitang-CPaasrunOnly-Medium-76115-Controlling the flow rate per destination to selected outputs.[Serial][Slow]
|
['"fmt"', '"path/filepath"', '"strconv"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:qitang-CPaasrunOnly-Medium-76115-Controlling the flow rate per destination to selected outputs.[Serial][Slow]", func() {
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
exutil.By("Create pod to generate some logs")
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile, "-p", "RATE=3000", "-p", "REPLICAS=3").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
podNodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", appProj, "-ojsonpath={.items[*].spec.nodeName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNames := strings.Split(podNodeName, " ")
exutil.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-76115",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-76115",
storageClass: sc,
bucketName: "logging-loki-76115-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("Deploy non-logging managed log stores")
oc.SetupProject()
loki := externalLoki{
name: "loki-server",
namespace: oc.Namespace(),
}
defer loki.remove(oc)
loki.deployLoki(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-76115",
namespace: loggingNS,
serviceAccountName: "logcollector-76115",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-76115",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
patch := `[{"op": "add", "path": "/spec/outputs/-", "value": {"name":"loki-server","type":"loki", "loki": {"url":"http://` + loki.name + `.` + loki.namespace + `.svc:3100"}, "rateLimit": {"maxRecordsPerSecond": 10}}}, {"op": "add", "path": "/spec/pipelines/0/outputRefs/-", "value": "loki-server"}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
// sleep 3 minutes for the log to be collected
time.Sleep(3 * time.Minute)
exutil.By("check data in user-managed loki, the count of logs from each node in one minute should be ~10*60")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
lc.waitForLogsAppearByProject("", appProj)
res, _ := lc.query("", "sum by(kubernetes_host)(count_over_time({log_type=~\".+\"}[1m]))", 30, false, time.Now())
o.Expect(len(res.Data.Result) > 0).Should(o.BeTrue())
for _, r := range res.Data.Result {
// check the penultimate value
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, _ := strconv.Atoi(c)
o.Expect(count <= 650).To(o.BeTrue(), fmt.Sprintf("the count is %d, however the expect value is 600", count))
}
exutil.By("check data in lokistack, there should not have rate limitation")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
routeLokiStack := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lokistackClient := newLokiClient(routeLokiStack).withToken(bearerToken).retry(5)
for _, nodeName := range nodeNames {
// only check app logs, because for infra and audit logs, we don't know how many logs the OCP generates in one minute
res, _ := lokistackClient.query("application", "sum by(kubernetes_host)(count_over_time({kubernetes_host=\""+nodeName+"\"}[1m]))", 30, false, time.Now())
o.Expect(len(res.Data.Result) > 0).Should(o.BeTrue())
for _, r := range res.Data.Result {
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, _ := strconv.Atoi(c)
o.Expect(count >= 2900).Should(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
4bf23673-01c1-4a0a-a46d-38764c3e0156
|
Author:qitang-CPaasrunOnly-Medium-65195-Controlling log flow rates - different output with different rate
|
['"fmt"', '"path/filepath"', '"strconv"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:qitang-CPaasrunOnly-Medium-65195-Controlling log flow rates - different output with different rate", func() {
exutil.By("Create pod to generate some logs")
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile, "-p", "RATE=3000").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploy non-logging managed log stores")
oc.SetupProject()
logStoresNS := oc.Namespace()
loki := externalLoki{
name: "loki-server",
namespace: logStoresNS,
}
defer loki.remove(oc)
loki.deployLoki(oc)
es := externalES{
namespace: logStoresNS,
version: "8",
serverName: "elasticsearch-8",
loggingNS: logStoresNS,
}
defer es.remove(oc)
es.deploy(oc)
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: logStoresNS,
tls: false,
loggingNS: logStoresNS,
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
exutil.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-65195",
namespace: logStoresNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=http://"+loki.name+"."+loki.namespace+".svc:3100")
patch := fmt.Sprintf(`{"spec": {"outputs": [{"name":"loki-server","type":"loki","loki":{"url":"http://%s.%s.svc:3100"},"rateLimit": {"maxRecordsPerSecond": 20}}, {"name":"rsyslog-server","type":"syslog","syslog":{"url":"udp://%s.%s.svc:514","rfc":"RFC5424"},"rateLimit": {"maxRecordsPerSecond": 30}}, {"name":"elasticsearch-server","type":"elasticsearch","rateLimit":{"maxRecordsPerSecond": 10},"elasticsearch":{"version":8,"url":"http://%s.%s.svc:9200","index":"{.log_type||\"none-typed-logs\"}"}}]}}`, loki.name, loki.namespace, rsyslog.serverName, rsyslog.namespace, es.serverName, es.namespace)
clf.update(oc, "", patch, "--type=merge")
outputRefs := `[{"op": "replace", "path": "/spec/pipelines/0/outputRefs", "value": ["loki-server", "rsyslog-server", "elasticsearch-server"]}]`
clf.update(oc, "", outputRefs, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("check collector pods' configuration")
lokiConfig := `[transforms.output_loki_server_throttle]
type = "throttle"
inputs = ["pipeline_forward_to_loki_viaqdedot_2"]
window_secs = 1
threshold = 20`
rsyslogConfig := `[transforms.output_rsyslog_server_throttle]
type = "throttle"
inputs = ["pipeline_forward_to_loki_viaqdedot_2"]
window_secs = 1
threshold = 30`
esConfig := `[transforms.output_elasticsearch_server_throttle]
type = "throttle"
inputs = ["pipeline_forward_to_loki_viaqdedot_2"]
window_secs = 1
threshold = 10`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", lokiConfig, rsyslogConfig, esConfig)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "some of the configuration is not in vector.toml")
// sleep 3 minutes for the log to be collected
time.Sleep(3 * time.Minute)
exutil.By("check data in loki, the count of logs from each node in one minute should be ~20*60")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
res, _ := lc.query("", "sum by(kubernetes_host)(count_over_time({log_type=~\".+\"}[1m]))", 30, false, time.Now())
o.Expect(len(res.Data.Result) > 0).Should(o.BeTrue())
for _, r := range res.Data.Result {
// check the penultimate value
v := r.Values[len(r.Values)-2]
c := convertInterfaceToArray(v)[1]
count, _ := strconv.Atoi(c)
o.Expect(count <= 1300).To(o.BeTrue(), fmt.Sprintf("the count is %d, however the expect value is 1200", count))
}
//TODO: find a way to check the doc count in rsyslog and es8
/*
exutil.By("check data in ES, the count of logs from each node in one minute should be ~10*60")
for _, node := range nodeNames {
query := `{"query": {"bool": {"must": [{"match_phrase": {"hostname.keyword": "` + node + `"}}, {"range": {"@timestamp": {"gte": "now-1m/m", "lte": "now/m"}}}]}}}`
count, err := es.getDocCount(oc, "", query)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count <= 700).Should(o.BeTrue(), fmt.Sprintf("The increased count in %s in 1 minute is: %d", node, count))
}
*/
})
| |||||
test case
|
openshift/openshift-tests-private
|
53f78338-0b9d-4f8b-81f4-3f11992de1a8
|
Author:qitang-CPaasrunOnly-Critical-75841-Filter audit logs and forward to log store.[Serial]
|
['"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:qitang-CPaasrunOnly-Critical-75841-Filter audit logs and forward to log store.[Serial]", func() {
exutil.By("Deploying LokiStack")
ls := lokiStack{
name: "loki-75841",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-75841",
storageClass: sc,
bucketName: "logging-loki-75841-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-75841",
namespace: loggingNS,
serviceAccountName: "logcollector-75841",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "audit-policy.yaml"),
secretName: "lokistack-secret-75841",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("wait for audit logs to be collected")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
// sleep 3 minutes for logs to be collected
time.Sleep(3 * time.Minute)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
exutil.By("check if the audit policy is applied to audit logs or not")
//404,409,422,429
e2e.Logf("should not find logs with responseStatus.code: 404/409/422/429")
for _, code := range []string{"404", "409", "422", "429"} {
log, err := lc.searchLogsInLoki("audit", "{log_type=\"audit\" } | json | responseStatus_code=\""+code+"\"")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue(), "Find audit logs with responseStatus_code="+code)
}
e2e.Logf("logs with stage=\"RequestReceived\" should not be collected")
log, err := lc.searchLogsInLoki("audit", "{log_type=\"audit\" } | json | stage=\"RequestReceived\"")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf("log pod changes as RequestResponse level")
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="RequestResponse", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="RequestResponse", objectRef_subresource!~".+", objectRef_apiGroup!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Log "pods/log", "pods/status" as Request level`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource="status"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource="status"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource="binding"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource="binding"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-config-managed", "cm/merged-trusted-image-registry-ca")
e2e.Logf(`Don't log requests to a configmap called "merged-trusted-image-registry-ca"`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="configmaps", objectRef_name="merged-trusted-image-registry-ca"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Log the request body of configmap changes in "openshift-multus"`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level="Request", objectRef_resource="configmaps", objectRef_namespace="openshift-multus"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level!="Request", objectRef_resource="configmaps", objectRef_namespace="openshift-multus"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Log configmap and secret changes in all other namespaces at the RequestResponse level.`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level="RequestResponse", objectRef_resource="configmaps", objectRef_namespace!="openshift-multus"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level!="RequestResponse", objectRef_resource="configmaps", objectRef_namespace!="openshift-multus"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level="RequestResponse", objectRef_resource="secrets"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level!="RequestResponse", objectRef_resource="secrets"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Don't log watch requests by the "system:serviceaccount:openshift-monitoring:prometheus-k8s" on endpoints, services or pods`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | verb="watch", user_username="system:serviceaccount:openshift-monitoring:prometheus-k8s", objectRef_resource="endpoints"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | verb="watch", user_username="system:serviceaccount:openshift-monitoring:prometheus-k8s", objectRef_resource="services"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
//log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | verb="watch", user_username="system:serviceaccount:openshift-monitoring:prometheus-k8s", objectRef_resource="pods"`)
//o.Expect(err).NotTo(o.HaveOccurred())
//o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Don't log authenticated requests to certain non-resource URL paths.`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | requestURI="/metrics"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Log all other resources in core, operators.coreos.com and rbac.authorization.k8s.io at the Request level.`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="operators.coreos.com", level="Request"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="operators.coreos.com", level!="Request"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="rbac.authorization.k8s.io", level="Request"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="rbac.authorization.k8s.io", level!="Request"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="", level="Request", objectRef_resource!="secrets", objectRef_resource!="configmaps", objectRef_resource!="pods", stage=~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_apiGroup="", level!="Request", objectRef_resource!="secrets", objectRef_resource!="configmaps", objectRef_resource!="pods", stage=~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`A catch-all rule to log all other requests at the Metadata level.`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level="Metadata"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
2c11470a-8ac3-4b3d-8cab-67ae3aa69f2d
|
CPaasrunOnly-Author:qitang-High-67421-Separate policies can be applied on separate pipelines.[Serial]
|
['"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:qitang-High-67421-Separate policies can be applied on separate pipelines.[Serial]", func() {
exutil.By("Deploying an external log store")
es := externalES{
namespace: oc.Namespace(),
loggingNS: loggingNS,
version: "8",
serverName: "external-es",
httpSSL: false,
}
defer es.remove(oc)
es.deploy(oc)
exutil.By("Deploying LokiStack")
ls := lokiStack{
name: "loki-67421",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-67421",
storageClass: sc,
bucketName: "logging-loki-67421-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-67421",
namespace: loggingNS,
serviceAccountName: "logcollector-67421",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "67421.yaml"),
secretName: "lokistack-secret-67421",
collectAuditLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "ES_VERSION="+es.version, "ES_URL=http://"+es.serverName+"."+es.namespace+".svc:9200")
// sleep 3 minutes for logs to be collected
time.Sleep(3 * time.Minute)
es.waitForIndexAppear(oc, "audit")
exutil.By("check data in logs stores")
count, err := es.getDocCount(oc, "audit", `{"query": {"term": {"stage": "RequestReceived"}}}`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue())
count, err = es.getDocCount(oc, "audit", `{"query": {"bool": {"must": [{"term": {"objectRef.resource": "pods"}},{"match": {"level": "RequestResponse"}}]}}}`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count > 0).Should(o.BeTrue())
count, err = es.getDocCount(oc, "audit", `{"query": {"bool": {"must": [{"term": {"objectRef.resource": "pods"}}, {"terms": {"objectRef.subresource": ["status", "binding"]}}, {"match": {"level": "Request"}}]}}}`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue())
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
log, err := lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource="status"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource="status"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource="binding"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource="binding"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
1d333f6f-6c74-40cb-a9d5-51c5052735ff
|
CPaasrunOnly-Author:qitang-Medium-68318-Multiple policies can be applied to one pipeline.[Serial]
|
['"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:qitang-Medium-68318-Multiple policies can be applied to one pipeline.[Serial]", func() {
exutil.By("Deploying LokiStack")
ls := lokiStack{
name: "loki-68318",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-68318",
storageClass: sc,
bucketName: "logging-loki-68318-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-68318",
namespace: loggingNS,
serviceAccountName: "logcollector-68318",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "68318.yaml"),
secretName: "lokistack-secret-68318",
collectAuditLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
// sleep 3 minutes for logs to be collected
time.Sleep(3 * time.Minute)
exutil.By("generate some audit logs")
pod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", cloNS, "-l", "name=cluster-logging-operator", "-ojsonpath={.items[0].metadata.name}").Output()
oc.AsAdmin().NotShowInfo().WithoutNamespace().Run("logs").Args("-n", cloNS, pod).Execute()
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
e2e.Logf("logs with stage=\"RequestReceived\" should not be collected")
log, err := lc.searchLogsInLoki("audit", "{log_type=\"audit\" } | json | stage=\"RequestReceived\"")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf("log pod changes as Request level")
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="Request", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level!="Request", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
e2e.Logf(`Log secret changes in all namespaces at the Request level.`)
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level="Request", objectRef_resource="secrets"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | level!="Request", objectRef_resource="secrets"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue())
exutil.By("Update the order of filters in filterRefs")
clf.update(oc, "", `[{"op": "replace", "path": "/spec/pipelines/0/filterRefs", "value": ["my-policy-1", "my-policy-0"]}]`, "--type=json")
clf.waitForCollectorPodsReady(oc)
// sleep 3 minutes for logs to be collected
time.Sleep(3 * time.Minute)
e2e.Logf("log pod changes as RequestResponse level")
log, err = lc.searchLogsInLoki("audit", `{log_type="audit"} | json | objectRef_resource="pods", level="RequestResponse", objectRef_subresource!~".+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) > 0).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
f296cbad-0fe8-4694-b195-bc43b075ec08
|
CPaasrunOnly-Author:kbharti-Critical-67565-High-55388-Verify that non-admin/regular user can access logs and query rules as per rolebindings assigned to the user[Serial][Slow]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:kbharti-Critical-67565-High-55388-Verify that non-admin/regular user can access logs and query rules as per rolebindings assigned to the user[Serial][Slow]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
exutil.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-67565",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-67565",
storageClass: sc,
bucketName: "logging-loki-67565-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-67565",
namespace: loggingNS,
serviceAccountName: "logcollector-67565",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-67565",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
// Creating cluster roles to allow read access from LokiStack
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
g.By("Create app project with non-admin/regular user")
oc.SetupProject()
userName := oc.Username()
appProj := oc.Namespace()
bearerToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", appProj, "openshift.io/cluster-monitoring=true").Execute()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create Loki Alerting rule")
appAlertingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-alerting-rule-template.yaml")
params := []string{"-f", appAlertingTemplate, "-p", "NAMESPACE=" + appProj}
err = oc.Run("create").Args("-f", exutil.ProcessTemplate(oc, params...), "-n", appProj).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
g.By("Validate that user cannot access logs and rules of owned namespace without RBAC - 403 Auth exception")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
_, err = lc.searchByNamespace("application", appProj)
o.Expect(err).To(o.HaveOccurred())
_, err = lc.queryRules("application", appProj)
o.Expect(err).To(o.HaveOccurred())
g.By("Create Role-binding to access logs and rules of owned project")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "cluster-logging-application-view", userName, "-n", appProj).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Validate user can access logs and rules of owned namespace after RBAC is created - Success flow")
lc.waitForLogsAppearByProject("application", appProj)
appRules, err := lc.queryRules("application", appProj)
o.Expect(err).NotTo(o.HaveOccurred())
matchDataInResponse := []string{"name: MyAppLogVolumeAlert", "alert: MyAppLogVolumeIsHigh", "tenantId: application"}
for _, matchedData := range matchDataInResponse {
if !strings.Contains(string(appRules), matchedData) {
e2e.Failf("Response is missing %s", matchedData)
}
}
e2e.Logf("Rules API response validated succesfully")
})
| |||||
test case
|
openshift/openshift-tests-private
|
c11d3096-4217-4f28-aee4-29b71ca431aa
|
CPaasrunOnly-Author:kbharti-Critical-67643-Verify logs access for LokiStack adminGroups[Serial][Slow]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:kbharti-Critical-67643-Verify logs access for LokiStack adminGroups[Serial][Slow]", func() {
g.By("Create Groups with users")
oc.SetupProject()
user1 := oc.Username()
user1Token, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "infra-admin-group-67643").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("group", "infra-admin-group-67643").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "infra-admin-group-67643", user1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
user2 := oc.Username()
user2Token, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "audit-admin-group-67643").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().Run("delete").Args("group", "audit-admin-group-67643").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "audit-admin-group-67643", user2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack with adminGroups")
exutil.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-67643",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-67643",
storageClass: sc,
bucketName: "logging-loki-67643-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "-p", "ADMIN_GROUPS=[\"audit-admin-group-67643\",\"infra-admin-group-67643\"]")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-67643",
namespace: loggingNS,
serviceAccountName: "logcollector-67643",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-67643",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
// Creating cluster roles to allow read access from LokiStack
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
g.By("Create RBAC for groups to access infra/audit logs")
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-infrastructure-view", "infra-admin-group-67643").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-infrastructure-view", "infra-admin-group-67643").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-audit-view", "audit-admin-group-67643").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-audit-view", "audit-admin-group-67643").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check Logs Access with users from AdminGroups")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(user1Token).retry(5)
lc.waitForLogsAppearByKey("infrastructure", "log_type", "infrastructure")
lc = newLokiClient(route).withToken(user2Token).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
})
| |||||
test case
|
openshift/openshift-tests-private
|
befbdab8-45d7-4bc2-b989-9e84bf50eee2
|
CPaasrunOnly-Author:kbharti-High-70683-Medium-70684-Validate new Loki installations support TSDBv3 and v13 storage schema and automatic stream sharding[Serial]
|
['"os"', '"os/exec"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:kbharti-High-70683-Medium-70684-Validate new Loki installations support TSDBv3 and v13 storage schema and automatic stream sharding[Serial]", func() {
g.By("Deploy Loki stack with v13 schema and tsdb store")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-70683",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-70683",
storageClass: sc,
bucketName: "logging-loki-70683-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-70683",
namespace: loggingNS,
serviceAccountName: "logcollector-70683",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-70683",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Extracting Loki config ...")
dirname := "/tmp/" + oc.Namespace() + "-loki-otel-support"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Validate Loki is using v13 schema in config")
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
storageSchemaConfig := StorageSchemaConfig{}
err = yaml.Unmarshal(lokiStackConf, &storageSchemaConfig)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(storageSchemaConfig.SchemaConfig.Configs[0].Schema).Should(o.Equal("v13"))
o.Expect(storageSchemaConfig.SchemaConfig.Configs[0].Store).Should(o.Equal("tsdb"))
g.By("Validate Automatic stream sharding")
lokiLimitsConfig := LokiLimitsConfig{}
err = yaml.Unmarshal(lokiStackConf, &lokiLimitsConfig)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(lokiLimitsConfig.LimitsConfig.ShardStreams.Enabled).Should(o.Equal(true))
o.Expect(lokiLimitsConfig.LimitsConfig.ShardStreams.DesiredRate).Should(o.Equal("3MB"))
o.Expect(lokiLimitsConfig.LimitsConfig.AllowStructuredMetadata).Should(o.Equal(true))
g.By("Check exposed metrics for Loki Stream Sharding")
defer removeClusterRoleFromServiceAccount(oc, cloNS, "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, cloNS, "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", cloNS)
for _, metric := range []string{"loki_rate_store_refresh_failures_total", "loki_rate_store_streams", "loki_rate_store_max_stream_shards", "loki_rate_store_max_stream_rate_bytes", "loki_rate_store_max_unique_stream_rate_bytes", "loki_stream_sharding_count"} {
e2e.Logf("Checking metric: %s", metric)
checkMetric(oc, bearerToken, metric, 3)
}
g.By("Override default value for desired stream sharding rate on tenants")
patchConfig := `
spec:
limits:
global:
ingestion:
perStreamDesiredRate: 4
tenants:
application:
ingestion:
perStreamDesiredRate: 5
audit:
ingestion:
perStreamDesiredRate: 6
`
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("lokistack", ls.name, "-n", ls.namespace, "--type", "merge", "-p", patchConfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
_, err = exec.Command("bash", "-c", "rm -rf "+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
//Validating config.yaml below
lokiStackConf, err = os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
err = yaml.Unmarshal(lokiStackConf, &lokiLimitsConfig)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(lokiLimitsConfig.LimitsConfig.ShardStreams.Enabled).Should(o.Equal(true))
o.Expect(lokiLimitsConfig.LimitsConfig.ShardStreams.DesiredRate).Should(o.Equal("4MB"))
//Validating runtime-config.yaml below
overridesConfig, err := os.ReadFile(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
runtimeConfig := RuntimeConfig{}
err = yaml.Unmarshal(overridesConfig, &runtimeConfig)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(runtimeConfig.Overrides.Application.ShardStreams.DesiredRate).Should(o.Equal("5MB"))
o.Expect(runtimeConfig.Overrides.Audit.ShardStreams.DesiredRate).Should(o.Equal("6MB"))
e2e.Logf("Overrides validated successfully!")
})
| |||||
test case
|
openshift/openshift-tests-private
|
1a38c688-ba7d-4e3f-9003-fad95cde8d1c
|
CPaasrunOnly-Author:kbharti-High-70714-Show warning to user for upgrading to TSDBv3 store and v13 schema[Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:kbharti-High-70714-Show warning to user for upgrading to TSDBv3 store and v13 schema[Serial]", func() {
// The Alert will be only be shown on a tshirt size of 1x.extra-small and greater
if !validateInfraAndResourcesForLoki(oc, "35Gi", "16") {
g.Skip("Current platform not supported/resources not available for this test!")
}
g.By("Deploy Loki stack with v12 schema and bolt-db shipper")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-70714",
namespace: loggingNS,
tSize: "1x.extra-small",
storageType: s,
storageSecret: "storage-secret-70714",
storageClass: sc,
bucketName: "logging-loki-70714-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "-p", "STORAGE_SCHEMA_VERSION=v12")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
defer removeClusterRoleFromServiceAccount(oc, ls.namespace, "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, ls.namespace, "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
token := getSAToken(oc, "default", ls.namespace)
queryAlertManagerForActiveAlerts(oc, token, false, "LokistackSchemaUpgradesRequired", 5)
e2e.Logf("Alert LokistackSchemaUpgradesRequired is firing...")
})
| |||||
test case
|
openshift/openshift-tests-private
|
f0e69aef-53f9-4a4f-8b87-a2c404526d51
|
Author:kbharti-CPaasrunOnly-Medium-70685-Validate support for blocking queries on Loki[Serial]
|
['"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:kbharti-CPaasrunOnly-Medium-70685-Validate support for blocking queries on Loki[Serial]", func() {
g.By("Create 3 application generator projects")
oc.SetupProject()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
appProj1 := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
appProj2 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj2, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
appProj3 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj3, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy Loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-70685",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-70685",
storageClass: sc,
bucketName: "logging-loki-70685-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
// patch spec to block specific queries
patchConfig := `
spec:
limits:
tenants:
application:
queries:
blocked:
- pattern: '{kubernetes_namespace_name="%s"}'
- pattern: '.*%s.*'
regex: true
`
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("lokistack", ls.name, "-n", ls.namespace, "--type", "merge", "-p", fmt.Sprintf(patchConfig, appProj1, appProj2)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-70685",
namespace: loggingNS,
serviceAccountName: "logcollector-70685",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-70685",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
defer removeClusterRoleFromServiceAccount(oc, ls.namespace, "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, ls.namespace, "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", ls.namespace)
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
g.By("Validate queries are blocked as per the spec config")
_, err = lc.searchByNamespace("application", appProj1)
// Cannot query {kubernetes_namespace_name="appProj1"} since this query is blocked by policy
o.Expect(err).To(o.HaveOccurred())
_, err = lc.searchByNamespace("application", appProj2)
// Any query containing appProj2 would be blocked by policy (regex)
o.Expect(err).To(o.HaveOccurred())
//Success since no blocking policy exists on appProj3
lc.waitForLogsAppearByProject("application", appProj3)
})
| |||||
test case
|
openshift/openshift-tests-private
|
0e9a0380-d898-45eb-832f-e9c0eb36b0e8
|
Author:kbharti-CPaasrunOnly-Critical-75334-Forward logs to lokiStack via clusterLogForwarder.observability.openshift.io API using per tenant and global labelKeys[Serial]
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:kbharti-CPaasrunOnly-Critical-75334-Forward logs to lokiStack via clusterLogForwarder.observability.openshift.io API using per tenant and global labelKeys[Serial]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
exutil.By("Create an application")
oc.SetupProject()
user1 := oc.Username()
appProj := oc.Namespace()
userToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a group and role bindings to access loki logs")
defer oc.AsAdmin().Run("delete").Args("group", "admin-group-75334").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "admin-group-75334").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "admin-group-75334", user1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-infrastructure-view", "admin-group-75334").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-infrastructure-view", "admin-group-75334").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-audit-view", "admin-group-75334").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-audit-view", "admin-group-75334").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-application-view", "admin-group-75334").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-application-view", "admin-group-75334").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploying LokiStack with adminGroup")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-75334",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-75334",
storageClass: sc,
bucketName: "logging-loki-75334-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "-p", "ADMIN_GROUPS=[\"admin-group-75334\"]")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to Lokistack")
clf := clusterlogforwarder{
name: "instance-75334",
namespace: loggingNS,
serviceAccountName: "logcollector-75334",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack-with-labelkeys.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "APP_LABELKEYS=[\"kubernetes.container_name\"]", "IGNORE_GLOBAL_INFRA=true", "INFRA_LABELKEYS=[\"kubernetes.namespace_name\"]", "GLOBAL_LABELKEYS=[\"log_type\"]")
exutil.By("Check that logs are forwarded to LokiStack")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(userToken).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type ", "audit")
lc.waitForLogsAppearByKey("infrastructure", "kubernetes_namespace_name", "openshift-monitoring")
lc.waitForLogsAppearByKey("application", "log_type", "application")
// Get some pod and container names under extracted infra logs
logs, err := lc.searchByKey("infrastructure", "kubernetes_namespace_name", "openshift-monitoring")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs := extractLogEntities(logs)
var infraLogPodNames []string
var infraLogContainerNames []string
for _, log := range extractedLogs {
infraLogPodNames = append(infraLogPodNames, log.Kubernetes.PodName)
infraLogContainerNames = append(infraLogContainerNames, log.Kubernetes.ContainerName)
}
exutil.By("Validating application logs with labelKeys")
// Since global labelkeys is defined as 'log_type' and application labelkeys is defined as 'kubernetes.container_name' with ignoreGlobal as 'false',
// application tenant can be queried with 'log_type' and 'kubernetes_container_name' keys only.
// Query with key 'kubernetes_namespace_name' - should yield an empty response
logs, err = lc.searchByKey("application", "kubernetes_namespace_name", appProj)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
// Query with key 'kubernetes_pod_name' - should yield an empty response
podList, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
logs, err = lc.searchByKey("application", "kubernetes_pod_name", podList.Items[0].Name)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
// Query with key 'kubernetes_container_name' - should yield a NON empty response
logs, err = lc.searchByKey("application", "kubernetes_container_name", "logging-centos-logtest")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) != 0).Should(o.BeTrue())
e2e.Logf("Validation with application labelKeys is success")
exutil.By("Validating infrastructure log streams with labelKeys")
// Since global labelkeys is defined as 'log_type' BUT infrastructure labelkeys is defined as 'kubernetes.namespace_name' with ignoreGlobal as 'true',
// Infrastructure tenant can be queried with 'kubernetes_namespace_name' key only.
// Query with key 'log_type' - should yield an empty response
logs, err = lc.searchByKey("infrastructure", "log_type", "infrastructure")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
// Query with key 'kubernetes_pod_name' - should yield an empty response
for _, pod := range infraLogPodNames {
logs, err = lc.searchByKey("infrastructure", "kubernetes_pod_name", pod)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
}
// Query with key 'kubernetes_container_name' - should yield a empty response
for _, container := range infraLogContainerNames {
logs, err := lc.searchByKey("infrastructure", "kubernetes_container_name", container)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
}
e2e.Logf("Validation with infrastructure labelKeys is success")
})
| |||||
test case
|
openshift/openshift-tests-private
|
4457dc49-19ee-4a89-bbab-a97b17634f64
|
Author:kbharti-CPaasrunOnly-High-75369-Forward logs to lokiStack via ClusterLogForwarder.observability.openshift.io API using per tenant keys and no global overrides[Serial]
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:kbharti-CPaasrunOnly-High-75369-Forward logs to lokiStack via ClusterLogForwarder.observability.openshift.io API using per tenant keys and no global overrides[Serial]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
exutil.By("Create an application")
oc.SetupProject()
user1 := oc.Username()
appProj := oc.Namespace()
userToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a group and role bindings to access loki logs")
defer oc.AsAdmin().Run("delete").Args("group", "admin-group-75369").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "admin-group-75369").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "admin-group-75369", user1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-infrastructure-view", "admin-group-75369").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-infrastructure-view", "admin-group-75369").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-audit-view", "admin-group-75369").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-audit-view", "admin-group-75369").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-application-view", "admin-group-75369").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-application-view", "admin-group-75369").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploying LokiStack with adminGroup")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-75369",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-75369",
storageClass: sc,
bucketName: "logging-loki-75369-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "-p", "ADMIN_GROUPS=[\"admin-group-75369\"]")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to Lokistack")
clf := clusterlogforwarder{
name: "instance-75369",
namespace: loggingNS,
serviceAccountName: "logcollector-75369",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack-with-labelkeys.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "APP_LABELKEYS=[\"kubernetes.labels.test\"]", "IGNORE_GLOBAL_INFRA=true", "INFRA_LABELKEYS=[\"kubernetes.namespace_name\"]", "GLOBAL_LABELKEYS=[]")
exutil.By("Check that logs are forwarded to LokiStack")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(userToken).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
lc.waitForLogsAppearByKey("application", "log_type", "application")
lc.waitForLogsAppearByKey("infrastructure", "kubernetes_namespace_name", "openshift-monitoring")
// Get some pod and container names under extracted infra logs
logs, err := lc.searchByKey("infrastructure", "kubernetes_namespace_name", "openshift-monitoring")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs := extractLogEntities(logs)
var infraLogPodNames []string
var infraLogContainerNames []string
for _, log := range extractedLogs {
infraLogPodNames = append(infraLogPodNames, log.Kubernetes.PodName)
infraLogContainerNames = append(infraLogContainerNames, log.Kubernetes.ContainerName)
}
exutil.By("Validating application logs with labelKeys")
// Since global labelkeys are 'undefined/not overridden' and application labelkeys is defined as 'kubernetes.labels.test' with ignoreGlobal as 'false',
// application tenant can be queried with the default labelKeys and 'kubernetes.labels.test' keys.
// Query with key 'kubernetes_namespace_name' - should yield a NON empty response
logs, err = lc.searchByKey("application", "kubernetes_namespace_name", appProj)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) != 0).Should(o.BeTrue())
// Query with key 'kubernetes_pod_name' - should yield a NON empty response
podList, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
logs, err = lc.searchByKey("application", "kubernetes_pod_name", podList.Items[0].Name)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) != 0).Should(o.BeTrue())
// Query with key 'kubernetes_container_name' - should yield a NON empty response
logs, err = lc.searchByKey("application", "kubernetes_container_name", "logging-centos-logtest")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) != 0).Should(o.BeTrue())
// Query with key 'kubernetes.labels.test' - should yield a NON empty response
logs, err = lc.searchByKey("application", "kubernetes_labels_test", "centos-logtest")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) != 0).Should(o.BeTrue())
e2e.Logf("Validation with application labelKeys is success")
exutil.By("Validating infrastructure log streams with labelKeys")
// Since global labelkeys is 'undefined/not overridden' BUT infrastructure labelkeys is defined as 'kubernetes.namespace_name' with ignoreGlobal as 'true',
// Infrastructure tenant can be queried with 'kubernetes_namespace_name' key only.
// Query with key 'log_type' - should yield an empty response
logs, err = lc.searchByKey("infrastructure", "log_type", "infrastructure")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
// Query with key 'kubernetes_pod_name' - should yield an empty response
for _, pod := range infraLogPodNames {
logs, err = lc.searchByKey("infrastructure", "kubernetes_pod_name", pod)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
}
// Query with key 'kubernetes_container_name' - should yield a empty response
for _, container := range infraLogContainerNames {
logs, err := lc.searchByKey("infrastructure", "kubernetes_container_name", container)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs = extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
}
e2e.Logf("Validation with infrastructure labelKeys is success")
})
| |||||
test case
|
openshift/openshift-tests-private
|
fbc28676-1e00-4dcd-bb65-d622a601179c
|
Author:kbharti-CPaasrunOnly-Critical-76990-Verify that LokiStack provides a default set of otlp configuration[Serial]
|
['"os"', '"path/filepath"', '"reflect"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:kbharti-CPaasrunOnly-Critical-76990-Verify that LokiStack provides a default set of otlp configuration[Serial]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
exutil.By("Create an application")
oc.SetupProject()
user1 := oc.Username()
appProj := oc.Namespace()
userToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a group and role bindings to access loki logs")
defer oc.AsAdmin().Run("delete").Args("group", "admin-group-76990").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "admin-group-76990").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "admin-group-76990", user1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-infrastructure-view", "admin-group-76990").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-infrastructure-view", "admin-group-76990").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-audit-view", "admin-group-76990").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-audit-view", "admin-group-76990").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-application-view", "admin-group-76990").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-application-view", "admin-group-76990").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploying LokiStack with adminGroup")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-76990",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-76990",
storageClass: sc,
bucketName: "logging-loki-76990-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "ADMIN_GROUPS=[\"admin-group-76990\"]")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to Lokistack")
clf := clusterlogforwarder{
name: "instance-76990",
namespace: loggingNS,
serviceAccountName: "logcollector-76990",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-76990",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "DATAMODEL=Otel", `TUNING={"compression": "none"}`)
exutil.By("Extracting Loki config ...")
dirname := "/tmp/" + oc.Namespace() + "-lokistack-otlp-support"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Validate the default OTLP configuration under lokiStack config")
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
lokiLimitsConfig := LokiLimitsConfig{}
err = yaml.Unmarshal(lokiStackConf, &lokiLimitsConfig)
o.Expect(err).NotTo(o.HaveOccurred())
// Default expected OTLP configuration under limits_config
defaultOTLPConfig := `
resource_attributes:
attributes_config:
- action: index_label
attributes:
- k8s.container.name
- k8s.cronjob.name
- k8s.daemonset.name
- k8s.deployment.name
- k8s.job.name
- k8s.namespace.name
- k8s.node.name
- k8s.pod.name
- k8s.statefulset.name
- kubernetes.container_name
- kubernetes.host
- kubernetes.namespace_name
- kubernetes.pod_name
- log_source
- log_type
- openshift.cluster.uid
- openshift.log.source
- openshift.log.type
- service.name
- action: structured_metadata
attributes:
- k8s.node.uid
- k8s.pod.uid
- k8s.replicaset.name
- process.command_line
- process.executable.name
- process.executable.path
- process.pid
- action: structured_metadata
regex: k8s\.pod\.labels\..+
- action: structured_metadata
regex: openshift\.labels\..+
log_attributes:
- action: structured_metadata
attributes:
- k8s.event.level
- k8s.event.object_ref.api.group
- k8s.event.object_ref.api.version
- k8s.event.object_ref.name
- k8s.event.object_ref.resource
- k8s.event.request.uri
- k8s.event.response.code
- k8s.event.stage
- k8s.event.user_agent
- k8s.user.groups
- k8s.user.username
- level
- log.iostream
- action: structured_metadata
regex: k8s\.event\.annotations\..+
- action: structured_metadata
regex: systemd\.t\..+
- action: structured_metadata
regex: systemd\.u\..+`
var staticOtlpConfig OtlpConfig
err = yaml.Unmarshal([]byte(defaultOTLPConfig), &staticOtlpConfig)
o.Expect(err).NotTo(o.HaveOccurred())
if reflect.DeepEqual(lokiLimitsConfig.LimitsConfig.OtlpConfig, staticOtlpConfig) {
e2e.Logf("Validated expected default OTLP configuration under lokistack config")
} else {
e2e.Failf("Incorrect default OTLP configuration found. Failing case..")
}
//check logs in loki stack by quering with OTEL semantic attributes
exutil.By("Check logs are received with OTLP semantic convention attributes in loki")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(userToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "openshift_log_type", logType)
}
lc.waitForLogsAppearByKey("application", "k8s_namespace_name", appProj)
lc.waitForLogsAppearByKey("infrastructure", "k8s_namespace_name", "openshift-monitoring")
lc.waitForLogsAppearByKey("application", "k8s_container_name", "logging-centos-logtest")
exutil.By("Validate log streams are pushed to external storage bucket/container")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
})
| |||||
test case
|
openshift/openshift-tests-private
|
26cd3211-f953-40de-8f1d-0b27aff9acae
|
Author:kbharti-CPaasrunOnly-High-77345-Verify that LokiStack provides a custom set of otlp configuration with global and per tenant[Serial]
|
['"fmt"', '"os"', '"path/filepath"', '"reflect"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:kbharti-CPaasrunOnly-High-77345-Verify that LokiStack provides a custom set of otlp configuration with global and per tenant[Serial]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
exutil.By("Create an application")
oc.SetupProject()
user1 := oc.Username()
appProj := oc.Namespace()
userToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a group and role bindings to access loki logs")
defer oc.AsAdmin().Run("delete").Args("group", "admin-group-77345").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "new", "admin-group-77345").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("groups", "add-users", "admin-group-77345", user1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-infrastructure-view", "admin-group-77345").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-infrastructure-view", "admin-group-77345").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-audit-view", "admin-group-77345").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-audit-view", "admin-group-77345").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-group", "cluster-logging-application-view", "admin-group-77345").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-group", "cluster-logging-application-view", "admin-group-77345").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploying LokiStack with adminGroup")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-77345",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-77345",
storageClass: sc,
bucketName: "logging-loki-77345-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc, "ADMIN_GROUPS=[\"admin-group-77345\"]")
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
// Patch LokiStack CR with a custom otlp configuration
// Here disableRecommendedAttributes enables only the required stream labels when 'true'
customOTLPconfig := `{
"spec": {
"limits": {
"tenants": {
"application": {
"otlp": {
"streamLabels": {
"resourceAttributes": [
{ "name": "k8s.pod.name" }
]
},
"structuredMetadata": {
"logAttributes": [
{ "name": "k8s.pod.uid" }
]
}
}
},
"infrastructure": {
"otlp": {
"streamLabels": {
"resourceAttributes": [
{ "name": "k8s.container.name" }
]
},
"structuredMetadata": {
"logAttributes": [
{ "name": "log.iostream" }
]
}
}
}
}
},
"tenants": {
"mode": "openshift-logging",
"openshift": {
"otlp": {
"disableRecommendedAttributes": true
}
}
}
}
}`
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("lokistack", ls.name, "-n", ls.namespace, "--type", "merge", "-p", customOTLPconfig).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to Lokistack")
clf := clusterlogforwarder{
name: "instance-76990",
namespace: loggingNS,
serviceAccountName: "logcollector-76990",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-76990",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "DATAMODEL=Otel", `TUNING={"compression": "none"}`)
exutil.By("Extracting Loki config ...")
dirname := "/tmp/" + oc.Namespace() + "-lokistack-otlp-support"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Validate the default OTLP configuration under lokiStack config")
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
lokiLimitsConfig := LokiLimitsConfig{}
err = yaml.Unmarshal(lokiStackConf, &lokiLimitsConfig)
o.Expect(err).NotTo(o.HaveOccurred())
// default OTLP config when disableRecommendedAttributes is enabled.
defaultOTLPConfig := `
resource_attributes:
attributes_config:
- action: index_label
attributes:
- k8s.namespace.name
- kubernetes.namespace_name
- log_source
- log_type
- openshift.cluster.uid
- openshift.log.source
- openshift.log.type`
var staticOtlpConfig OtlpConfig
err = yaml.Unmarshal([]byte(defaultOTLPConfig), &staticOtlpConfig)
o.Expect(err).NotTo(o.HaveOccurred())
if reflect.DeepEqual(lokiLimitsConfig.LimitsConfig.OtlpConfig, staticOtlpConfig) {
e2e.Logf("Validated expected default OTLP configuration under lokistack config")
} else {
e2e.Failf("Incorrect default OTLP configuration found. Failing case..")
}
exutil.By("Validate the per tenant OTLP configuration under lokiStack overrides config")
lokiStackConf, err = os.ReadFile(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
// Application tenant
customOtlpconfigForApp := `
resource_attributes:
attributes_config:
- action: index_label
attributes:
- k8s.namespace.name
- k8s.pod.name
- kubernetes.namespace_name
- log_source
- log_type
- openshift.cluster.uid
- openshift.log.source
- openshift.log.type
log_attributes:
- action: structured_metadata
attributes:
- k8s.pod.uid`
err = yaml.Unmarshal([]byte(customOtlpconfigForApp), &staticOtlpConfig)
o.Expect(err).NotTo(o.HaveOccurred())
runtimeConfig := RuntimeConfig{}
err = yaml.Unmarshal(lokiStackConf, &runtimeConfig)
o.Expect(err).NotTo(o.HaveOccurred())
if reflect.DeepEqual(runtimeConfig.Overrides.Application.OtlpConfig, staticOtlpConfig) {
fmt.Println("Validated expected custom OTLP configuration for tenant: application")
} else {
e2e.Failf("Incorrect custom OTLP configuration found for tenant: application. Failing case..")
}
// Infrastructure tenant
customOtlpconfigForInfra := `
resource_attributes:
attributes_config:
- action: index_label
attributes:
- k8s.container.name
- k8s.namespace.name
- kubernetes.namespace_name
- log_source
- log_type
- openshift.cluster.uid
- openshift.log.source
- openshift.log.type
log_attributes:
- action: structured_metadata
attributes:
- log.iostream`
err = yaml.Unmarshal([]byte(customOtlpconfigForInfra), &staticOtlpConfig)
o.Expect(err).NotTo(o.HaveOccurred())
if reflect.DeepEqual(runtimeConfig.Overrides.Infrastructure.OtlpConfig, staticOtlpConfig) {
fmt.Println("Validated expected custom OTLP configuration for tenant: infrastructure")
} else {
e2e.Failf("Incorrect custom OTLP configuration found for tenant: infrastructure. Failing case..")
}
exutil.By("Check logs are received with OTLP semantic convention attributes in loki")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(userToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "openshift_log_type", logType)
}
lc.waitForLogsAppearByKey("application", "k8s_namespace_name", appProj)
lc.waitForLogsAppearByKey("infrastructure", "k8s_namespace_name", "openshift-monitoring")
// No logs found for app tenant with k8s_container_name streamLabel/labelKey since it is not included under custom overrides config
logs, err := lc.searchByKey("application", "k8s_container_name", "logging-centos-logtest")
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs := extractLogEntities(logs)
o.Expect(len(extractedLogs) == 0).Should(o.BeTrue())
exutil.By("Validate log streams are pushed to external storage bucket/container")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
})
| |||||
test case
|
openshift/openshift-tests-private
|
f4201930-ce4b-48d1-b21a-571cb9c0c0ff
|
CPaasrunOnly-Author:ikanse-High-47760-Vector Forward logs to Loki using default value via HTTP
|
['"context"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:ikanse-High-47760-Vector Forward logs to Loki using default value via HTTP", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create Loki project and deploy Loki Server")
oc.SetupProject()
lokiNS := oc.Namespace()
loki := externalLoki{"loki-server", lokiNS}
defer loki.remove(oc)
loki.deployLoki(oc)
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-47760",
namespace: lokiNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=http://"+loki.name+"."+lokiNS+".svc:3100")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
g.By("Searching for Application Logs in Loki")
appPodName, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Result[0].Stream.LogType == "application" && appLogs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed searching for application logs in Loki")
e2e.Logf("Application Logs Query is a success")
g.By("Searching for Audit Logs in Loki")
auditLogs, err := lc.searchLogsInLoki("", "{log_type=\"audit\"}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(auditLogs.Status).Should(o.Equal("success"))
o.Expect(auditLogs.Data.Result[0].Stream.LogType).Should(o.Equal("audit"))
o.Expect(auditLogs.Data.Stats.Summary.BytesProcessedPerSecond).ShouldNot(o.BeZero())
e2e.Logf("Audit Logs Query is a success")
g.By("Searching for Infra Logs in Loki")
infraLogs, err := lc.searchLogsInLoki("", "{log_type=\"infrastructure\"}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(infraLogs.Status).Should(o.Equal("success"))
o.Expect(infraLogs.Data.Result[0].Stream.LogType).Should(o.Equal("infrastructure"))
o.Expect(infraLogs.Data.Stats.Summary.BytesProcessedPerSecond).ShouldNot(o.BeZero())
})
| |||||
test case
|
openshift/openshift-tests-private
|
9160b2b5-38ea-4823-b078-21faf3d73fb3
|
CPaasrunOnly-Author:ikanse-Medium-48922-Vector Forward logs to Loki using correct loki.tenantKey.kubernetes.namespace_name via HTTP
|
['"context"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:ikanse-Medium-48922-Vector Forward logs to Loki using correct loki.tenantKey.kubernetes.namespace_name via HTTP", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create Loki project and deploy Loki Server")
oc.SetupProject()
lokiNS := oc.Namespace()
loki := externalLoki{"loki-server", lokiNS}
defer loki.remove(oc)
loki.deployLoki(oc)
tenantKey := "kubernetes_namespace_name"
g.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-48922",
namespace: lokiNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "TENANT_KEY={.kubernetes.namespace_name||\"none\"}", "URL=http://"+loki.name+"."+lokiNS+".svc:3100", "INPUT_REFS=[\"application\"]")
g.By("Searching for Application Logs in Loki using tenantKey")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
appPodName, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := lc.searchByKey("", tenantKey, appProj)
if err != nil {
return false, err
}
if logs.Status == "success" && logs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && logs.Data.Result[0].Stream.LogType == "application" && logs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Failed searching for application logs in Loki")
e2e.Logf("Application Logs Query using namespace as tenantKey is a success")
})
| |||||
test case
|
openshift/openshift-tests-private
|
3ef19c3e-8b89-4e2a-9472-90d2eff9dd90
|
CPaasrunOnly-Author:ikanse-Medium-48060-Medium-47801-Vector Forward logs to Loki using loki.labelKeys
|
['"context"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:ikanse-Medium-48060-Medium-47801-Vector Forward logs to Loki using loki.labelKeys", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Create project1 for app logs")
appProj1 := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", loglabeltemplate, "-p", "LABELS={\"negative\": \"centos-logtest\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj2 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj2, "-f", loglabeltemplate, "-p", "LABELS={\"positive\": \"centos-logtest\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create Loki project and deploy Loki Server")
oc.SetupProject()
lokiNS := oc.Namespace()
loki := externalLoki{"loki-server", lokiNS}
defer loki.remove(oc)
loki.deployLoki(oc)
labelKeys := "kubernetes_labels_positive"
podLabel := "centos-logtest"
g.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-47801",
namespace: lokiNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "LABEL_KEYS=[\"kubernetes.labels.positive\"]", "URL=http://"+loki.name+"."+lokiNS+".svc:3100", "INPUT_REFS=[\"application\"]")
g.By("Searching for Application Logs in Loki using LabelKey - Postive match")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByKey("", labelKeys, podLabel)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Stats.Ingester.TotalLinesSent != 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Failed searching for application logs in Loki")
e2e.Logf("App logs found with matching LabelKey: %s and pod Label: %s", labelKeys, podLabel)
g.By("Searching for Application Logs in Loki using LabelKey - Negative match")
labelKeys = "kubernetes_labels_negative"
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByKey("", labelKeys, podLabel)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Store.TotalChunksDownloaded == 0 && appLogs.Data.Stats.Summary.BytesProcessedPerSecond == 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Failed searching for application logs in Loki")
e2e.Logf("No App logs found with matching LabelKey: %s and pod Label: %s", labelKeys, podLabel)
})
| |||||
test case
|
openshift/openshift-tests-private
|
fa420c89-64d4-4d10-9c94-479c30dfcf57
|
Author:ikanse-CPaasrunOnly-Medium-48925-Vector Forward logs to Loki using correct loki.tenantKey.kubernetes.container_name via HTTP
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:ikanse-CPaasrunOnly-Medium-48925-Vector Forward logs to Loki using correct loki.tenantKey.kubernetes.container_name via HTTP", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create Loki project and deploy Loki Server")
oc.SetupProject()
lokiNS := oc.Namespace()
loki := externalLoki{"loki-server", lokiNS}
defer loki.remove(oc)
loki.deployLoki(oc)
tenantKey := "kubernetes_container_name"
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-48925",
namespace: lokiNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "TENANT_KEY={.kubernetes.container_name||\"none\"}", "URL=http://"+loki.name+"."+lokiNS+".svc:3100", "INPUT_REFS=[\"application\"]")
g.By("Searching for Application Logs in Loki using tenantKey")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
tenantKeyID := "logging-centos-logtest"
lc.waitForLogsAppearByKey("", tenantKey, tenantKeyID)
e2e.Logf("Application Logs Query using kubernetes.container_name as tenantKey is a success")
})
| |||||
test case
|
openshift/openshift-tests-private
|
e6ddb7b7-4570-49a1-ac0c-04aef4063407
|
CPaasrunOnly-Author:qitang-High-71001-Collect or exclude logs by container[Slow]
|
['"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:qitang-High-71001-Collect or exclude logs by container[Slow]", func() {
exutil.By("Create Loki project and deploy Loki Server")
lokiNS := oc.Namespace()
loki := externalLoki{
name: "loki-server",
namespace: lokiNS,
}
defer loki.remove(oc)
loki.deployLoki(oc)
exutil.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-71001",
namespace: lokiNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "TENANT_KEY={.kubernetes.namespace_name||\"none\"}", "URL=http://"+loki.name+"."+lokiNS+".svc:3100", "INPUT_REFS=[\"application\"]")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app", "type": "application", "application": {"excludes": [{"container":"exclude*"}]}}]},{"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Create projects for app logs and deploy the log generators")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
multiContainerJSONLog := filepath.Join(loggingBaseDir, "generatelog", "multi_container_json_log_template.yaml")
oc.SetupProject()
ns := oc.Namespace()
containerNames := []string{
"logging-71001-include",
"exclude-logging-logs",
"fake-kube-proxy",
}
for _, name := range containerNames {
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", ns, "-p", "CONTAINER="+name, "-p", "CONFIGMAP="+name, "-p", "REPLICATIONCONTROLLER="+name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
err := oc.WithoutNamespace().Run("new-app").Args("-f", multiContainerJSONLog, "-n", ns, "-p", "CONTAINER=multiple-containers").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check logs in Loki, logs from containers/excludes* shouldn't be collected")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
lc.waitForLogsAppearByProject("", ns)
for _, container := range []string{"logging-71001-include", "fake-kube-proxy", "multiple-containers-0", "multiple-containers-1", "multiple-containers-2"} {
lc.waitForLogsAppearByKey("", "kubernetes_container_name", container)
}
for _, q := range []string{`{kubernetes_container_name=~"exclude.+"}`, `{kubernetes_namespace_name=~"openshift.+"}`} {
log, err := lc.searchLogsInLoki("", q)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue(), "find logs with query "+q+", this is not expected")
}
exutil.By("Update CLF to exclude all containers")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/excludes", "value": [{"container":"*"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
exutil.By("Check logs in Loki, no logs collected")
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", loki.namespace, "-l", "component=loki,appname=loki-server").Execute()
WaitForDeploymentPodsToBeReady(oc, loki.namespace, loki.name)
err = lc.waitForLogsAppearByQuery("", `{kubernetes_namespace_name=~".+"}`)
exutil.AssertWaitPollWithErr(err, "no container logs should be collected")
exutil.By("Update CLF to include/exclude containers")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/excludes", "value": [{"container":"exclude*"}]},{"op": "add", "path": "/spec/inputs/0/application/includes", "value": [{"container":"multiple-containers-0"},{"container":"*oxy"},{"container":"*log*"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
exutil.By("Check logs in Loki, only logs from containers multiple-containers-0, logging-71001-include and fake-kube-proxy should be collected")
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", loki.namespace, "-l", "component=loki,appname=loki-server").Execute()
WaitForDeploymentPodsToBeReady(oc, loki.namespace, loki.name)
lc.waitForLogsAppearByProject("", ns)
for _, container := range []string{"logging-71001-include", "fake-kube-proxy", "multiple-containers-0"} {
lc.waitForLogsAppearByKey("", "kubernetes_container_name", container)
}
for _, q := range []string{`{kubernetes_container_name=~"exclude.+"}`, `{kubernetes_namespace_name=~"openshift.+"}`, `{kubernetes_container_name=~"multiple-containers-1|multiple-containers-2"}`} {
log, err := lc.searchLogsInLoki("", q)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(log.Data.Result) == 0).Should(o.BeTrue(), "find logs with query "+q+", this is not expected")
}
exutil.By("Update CLF to include all application containers")
patch = `[{"op": "remove", "path": "/spec/inputs/0/application/excludes"},{"op": "replace", "path": "/spec/inputs/0/application/includes", "value": [{"container":"*"}]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
exutil.By("Check logs in Loki, only logs application projects should be collected")
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", loki.namespace, "-l", "component=loki,appname=loki-server").Execute()
WaitForDeploymentPodsToBeReady(oc, loki.namespace, loki.name)
lc.waitForLogsAppearByProject("", ns)
for _, container := range []string{"logging-71001-include", "fake-kube-proxy", "exclude-logging-logs", "multiple-containers-0", "multiple-containers-1", "multiple-containers-2"} {
lc.waitForLogsAppearByKey("", "kubernetes_container_name", container)
}
err = lc.waitForLogsAppearByQuery("", `{kubernetes_namespace_name=~"openshift.+"}`)
exutil.AssertWaitPollWithErr(err, "container logs from infra projects should not be collected")
})
| |||||
test case
|
openshift/openshift-tests-private
|
960e96b2-cc75-4a71-b1b0-8af8512c26c6
|
Author:qitang-CPaasrunOnly-ConnectedOnly-Medium-48646-Medium-49486-Deploy lokistack under different namespace and Vector Forward logs to LokiStack using CLF with gateway[Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:qitang-CPaasrunOnly-ConnectedOnly-Medium-48646-Medium-49486-Deploy lokistack under different namespace and Vector Forward logs to LokiStack using CLF with gateway[Serial]", func() {
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deploy loki stack")
oc.SetupProject()
lokiNS := oc.Namespace()
ls := lokiStack{
name: "loki-49486",
namespace: lokiNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-49486",
storageClass: sc,
bucketName: "logging-loki-49486-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
g.By("create clusterlogforwarder/instance")
lokiGatewaySVC := ls.name + "-gateway-http." + ls.namespace + ".svc:8080"
clf := clusterlogforwarder{
name: "clf-48646",
namespace: loggingNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack_gateway_https_secret.yaml"),
serviceAccountName: "logcollector-48646",
secretName: "lokistack-gateway-48646",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "GATEWAY_SVC="+lokiGatewaySVC)
//check logs in loki stack
g.By("check logs in loki")
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
})
| |||||
test case
|
openshift/openshift-tests-private
|
809c4bf3-5890-4533-b407-e061c8abd6bd
|
Author:kbharti-CPaasrunOnly-ConnectedOnly-Medium-54663-Medium-48628-unique cluster identifier in all type of the log record and Expose Loki metrics to Prometheus[Serial]
|
['"context"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:kbharti-CPaasrunOnly-ConnectedOnly-Medium-54663-Medium-48628-unique cluster identifier in all type of the log record and Expose Loki metrics to Prometheus[Serial]", func() {
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-54663",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-54663",
storageClass: sc,
bucketName: "logging-loki-54663" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-54663",
namespace: loggingNS,
serviceAccountName: "logcollector-54663",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-54663",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("checking app, infra and audit logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
g.By("checking if the unique cluster identifier is added to the log records")
clusterID, err := getClusterID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
for _, logType := range []string{"application", "infrastructure", "audit"} {
logs, err := lc.searchByKey(logType, "log_type", logType)
o.Expect(err).NotTo(o.HaveOccurred())
extractedLogs := extractLogEntities(logs)
for _, log := range extractedLogs {
o.Expect(log.OpenShift.ClusterID == clusterID).Should(o.BeTrue())
}
e2e.Logf("Find cluster_id in %s logs", logType)
}
svcs, err := oc.AdminKubeClient().CoreV1().Services(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/created-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
g.By("query metrics in prometheus")
prometheusToken := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
for _, svc := range svcs.Items {
if !strings.Contains(svc.Name, "grpc") && !strings.Contains(svc.Name, "ring") {
checkMetric(oc, prometheusToken, "{job=\""+svc.Name+"\"}", 3)
}
}
for _, metric := range []string{"loki_boltdb_shipper_compactor_running", "loki_distributor_bytes_received_total", "loki_inflight_requests", "workqueue_work_duration_seconds_bucket{namespace=\"" + loNS + "\", job=\"loki-operator-controller-manager-metrics-service\"}", "loki_build_info", "loki_ingester_streams_created_total"} {
checkMetric(oc, prometheusToken, metric, 3)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
bd529998-632c-433f-8177-22849a064b3b
|
CPaasrunOnly-ConnectedOnly-Author:kbharti-High-57063-Forward app logs to Loki with namespace selectors (vector)[Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-ConnectedOnly-Author:kbharti-High-57063-Forward app logs to Loki with namespace selectors (vector)[Serial]", func() {
g.By("Creating 2 applications..")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
appProj1 := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
appProj2 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj2, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-57063",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-57063",
storageClass: sc,
bucketName: "logging-loki-57063-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-57063",
namespace: loggingNS,
serviceAccountName: "logcollector-57063",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-57063",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, `INPUT_REFS=["infrastructure", "audit"]`)
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app", "type": "application", "application": {"includes": [{"namespace":"` + appProj2 + `"}]}}]}, {"op": "add", "path": "/spec/pipelines/0/inputRefs/-", "value": "new-app"}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
g.By("checking infra and audit logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
g.By("check logs in loki for custom app input..")
lc.waitForLogsAppearByProject("application", appProj2)
//no logs found for app not defined as custom input in clf
appLog, err := lc.searchByNamespace("application", appProj1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(appLog.Data.Result) == 0).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
a2dfc4eb-1fd6-4e6b-afc5-2e29d3e49ede
|
Author:qitang-CPaasrunOnly-High-74945-New filter detectMultilineException test[Serial][Slow]
|
['"context"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:qitang-CPaasrunOnly-High-74945-New filter detectMultilineException test[Serial][Slow]", func() {
multilineLogTypes := map[string][]string{
"java": {javaExc, complexJavaExc, nestedJavaExc},
"go": {goExc, goOnGaeExc, goSignalExc, goHTTP},
"ruby": {rubyExc, railsExc},
"js": {clientJsExc, nodeJsExc, v8JsExc},
"csharp": {csharpAsyncExc, csharpNestedExc, csharpExc},
"python": {pythonExc},
"php": {phpOnGaeExc, phpExc},
"dart": {
dartAbstractClassErr,
dartArgumentErr,
dartAssertionErr,
dartAsyncErr,
dartConcurrentModificationErr,
dartDivideByZeroErr,
dartErr,
dartTypeErr,
dartExc,
dartUnsupportedErr,
dartUnimplementedErr,
dartOOMErr,
dartRangeErr,
dartReadStaticErr,
dartStackOverflowErr,
dartFallthroughErr,
dartFormatErr,
dartFormatWithCodeErr,
dartNoMethodErr,
dartNoMethodGlobalErr,
},
}
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-74945",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-74945",
storageClass: sc,
bucketName: "logging-loki-74945-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-74945",
namespace: loggingNS,
serviceAccountName: "logcollector-74945",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-74945",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "detectmultiline", "type": "detectMultilineException"}]}, {"op": "add", "path": "/spec/pipelines/0/filterRefs", "value":["detectmultiline"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
g.By("create some pods to generate multiline error")
multilineLogFile := filepath.Join(loggingBaseDir, "generatelog", "multiline-error-log.yaml")
for k := range multilineLogTypes {
ns := "multiline-log-" + k + "-74945"
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--wait=false").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ns, "deploy/multiline-log", "cm/multiline-log").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-n", ns, "-f", multilineLogFile, "-p", "LOG_TYPE="+k, "-p", "RATE=60.00").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("check data in Loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for k, v := range multilineLogTypes {
g.By("check " + k + " logs\n")
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("application", "multiline-log-"+k+"-74945")
if err != nil {
return false, err
}
if appLogs.Status == "success" && len(appLogs.Data.Result) > 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "can't find "+k+" logs")
for _, log := range v {
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 120*time.Second, true, func(context.Context) (done bool, err error) {
dataInLoki, _ := lc.queryRange("application", "{kubernetes_namespace_name=\"multiline-log-"+k+"-74945\"}", len(v)*2, time.Now().Add(time.Duration(-2)*time.Hour), time.Now(), false)
lokiLogs := extractLogEntities(dataInLoki)
var messages []string
for _, lokiLog := range lokiLogs {
messages = append(messages, lokiLog.Message)
}
if len(messages) == 0 {
return false, nil
}
if !containSubstring(messages, log) {
e2e.Logf("can't find log\n%s, try next round", log)
return false, nil
}
return true, nil
})
if err != nil {
e2e.Failf("%s logs are not parsed", k)
}
}
e2e.Logf("\nfound %s logs in Loki\n", k)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
16f28d7c-6617-40d7-91f9-54f0cbc6ffaa
|
CPaasrunOnly-ConnectedOnly-Author:qitang-Medium-71144-Collect or exclude infrastructure logs.[Serial][Slow]
|
['"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-ConnectedOnly-Author:qitang-Medium-71144-Collect or exclude infrastructure logs.[Serial][Slow]", func() {
exutil.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-71144",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71144",
storageClass: sc,
bucketName: "logging-loki-71144-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-71144",
namespace: loggingNS,
serviceAccountName: "logcollector-71144",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-71144",
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace, "INPUT_REFS=[\"infrastructure\"]")
exutil.By("checking infra logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
err = lc.waitForLogsAppearByQuery("infrastructure", `{log_type="infrastructure",kubernetes_namespace_name=~".+"}`)
exutil.AssertWaitPollNoErr(err, "can't find infra container logs")
err = lc.waitForLogsAppearByQuery("infrastructure", `{log_type="infrastructure",kubernetes_namespace_name!~".+"}`)
exutil.AssertWaitPollNoErr(err, "can't find journal logs")
exutil.By("update CLF to only collect journal logs")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "selected-infra", "type": "infrastructure", "infrastructure": {"sources":["node"]}}]},{"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["selected-infra"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 3 minutes for collector pods to send the cached records
time.Sleep(3 * time.Minute)
exutil.By("check data in lokistack, only journal logs are collected")
re, _ := lc.queryRange("infrastructure", `{ log_type="infrastructure", kubernetes_namespace_name!~".+" }`, 30, time.Now().Add(time.Duration(-2)*time.Minute), time.Now(), true)
o.Expect(len(re.Data.Result) > 0).Should(o.BeTrue())
re, _ = lc.queryRange("infrastructure", `{ log_type="infrastructure", kubernetes_namespace_name=~".+" }`, 30, time.Now().Add(time.Duration(-2)*time.Minute), time.Now(), true)
o.Expect(len(re.Data.Result) == 0).Should(o.BeTrue())
exutil.By("Update CLF to collect infra container logs")
patch = `[{"op": "replace", "path": "/spec/inputs/0/infrastructure/sources", "value": ["container"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 3 minutes for collector pods to send the cached records
time.Sleep(3 * time.Minute)
exutil.By("check data in lokistack, only infra container logs are collected")
//check vector.toml, logs from logging infra pods should be excluded
searchString := `include_paths_glob_patterns = ["/var/log/pods/default_*/*/*.log", "/var/log/pods/kube-*_*/*/*.log", "/var/log/pods/kube_*/*/*.log", "/var/log/pods/openshift-*_*/*/*.log", "/var/log/pods/openshift_*/*/*.log"]
exclude_paths_glob_patterns = ["/var/log/pods/*/*/*.gz", "/var/log/pods/*/*/*.log.*", "/var/log/pods/*/*/*.tmp", "/var/log/pods/openshift-logging_*/gateway/*.log", "/var/log/pods/openshift-logging_*/loki*/*.log", "/var/log/pods/openshift-logging_*/opa/*.log", "/var/log/pods/openshift-logging_elasticsearch-*/*/*.log", "/var/log/pods/openshift-logging_kibana-*/*/*.log", "/var/log/pods/openshift-logging_logfilesmetricexporter-*/*/*.log"]`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
re, _ = lc.queryRange("infrastructure", `{ log_type="infrastructure", kubernetes_namespace_name=~".+" }`, 30, time.Now().Add(time.Duration(-2)*time.Minute), time.Now(), true)
o.Expect(len(re.Data.Result) > 0).Should(o.BeTrue())
re, _ = lc.queryRange("infrastructure", `{ log_type="infrastructure", kubernetes_namespace_name!~".+" }`, 30, time.Now().Add(time.Duration(-2)*time.Minute), time.Now(), true)
o.Expect(len(re.Data.Result) == 0).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
edd6aa84-6c00-47ab-a8c0-33cc424313a0
|
CPaasrunOnly-ConnectedOnly-Author:qitang-High-71749-Drop logs based on test of fields and their values[Serial][Slow]
|
['"path/filepath"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-ConnectedOnly-Author:qitang-High-71749-Drop logs based on test of fields and their values[Serial][Slow]", func() {
exutil.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-71749",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71749",
storageClass: sc,
bucketName: "logging-loki-71749-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "collector-71749",
namespace: loggingNS,
serviceAccountName: "logcollector-71749",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "71749.yaml"),
secretName: "lokistack-secret-71749",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("Create projects for app logs and deploy the log generators")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
var namespaces []string
for i := 0; i < 3; i++ {
ns := "logging-project-71749-" + strconv.Itoa(i)
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
namespaces = append(namespaces, ns)
}
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[0], "-p", "LABELS={\"test\": \"logging-71749-test\"}", "-p", "REPLICATIONCONTROLLER=logging-71749-test").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[1], "-p", "LABELS={\"test\": \"logging-71749-test\", \"test.logging.io/logging.qe-test-label\": \"logging-71749-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", namespaces[2], "-p", "LABELS={\"test\": \"logging-71749-test\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
lc.waitForLogsAppearByKey("application", "log_type", "application")
lc.waitForLogsAppearByKey("infrastructure", "log_type", "infrastructure")
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
// logs from openshift* projects are dropped
re, err := lc.searchLogsInLoki("infrastructure", `{ log_type="infrastructure", kubernetes_namespace_name=~"openshift.+" }`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(re.Data.Result) == 0).Should(o.BeTrue())
// only logs from namespaces[2] should be collected
app, err := lc.searchLogsInLoki("application", `{ log_type="application", kubernetes_namespace_name!~"`+namespaces[2]+`" }`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(app.Data.Result) == 0).Should(o.BeTrue())
//logs with a level of `error` and with a message that includes the word `error` are dropped
infra, err := lc.searchLogsInLoki("infrastructure", `{ log_type="infrastructure" } | level=~"error|err|eror", message=~".+error.+"`)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(infra.Data.Result) == 0).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
01b6d373-eb2b-4527-a95b-552c0ac1af4f
|
Author:anli-CPaasrunOnly-Critical-71049-Inputs.receiver.syslog to lokistack[Serial][Slow]
|
['"os"', '"os/exec"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:anli-CPaasrunOnly-Critical-71049-Inputs.receiver.syslog to lokistack[Serial][Slow]", func() {
g.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-71049",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71049",
storageClass: sc,
bucketName: "logging-loki-71049-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to forward logs to lokistack")
clf := clusterlogforwarder{
name: "instance-71049",
namespace: loggingNS,
serviceAccountName: "logcollector-71049",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "71049.yaml"),
secretName: "lokistack-secret-71049",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Create clusterlogforwarder as syslog clinet and forward logs to syslogserver")
sysCLF := clusterlogforwarder{
name: "instance",
namespace: oc.Namespace(),
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "rsyslog-serverAuth.yaml"),
secretName: "clf-syslog-secret",
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
g.By("Create secret for collector pods to connect to syslog server")
tmpDir := "/tmp/" + getRandomString()
defer exec.Command("rm", "-r", tmpDir).Output()
err = os.Mkdir(tmpDir, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+clf.name+"-syslog", "-n", clf.namespace, "--confirm", "--to="+tmpDir).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", sysCLF.secretName, "-n", sysCLF.namespace, "--from-file=ca-bundle.crt="+tmpDir+"/tls.crt").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer sysCLF.delete(oc)
sysCLF.create(oc, "URL=tls://"+clf.name+"-syslog."+clf.namespace+".svc:6514")
//check logs in loki stack
g.By("check logs in loki")
defer removeClusterRoleFromServiceAccount(oc, sysCLF.namespace, "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, sysCLF.namespace, "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", sysCLF.namespace)
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
lc.waitForLogsAppearByKey("infrastructure", "log_type", "infrastructure")
sysLog, err := lc.searchLogsInLoki("infrastructure", `{log_type = "infrastructure"}|json|facility = "local0"`)
o.Expect(err).NotTo(o.HaveOccurred())
sysLogs := extractLogEntities(sysLog)
o.Expect(len(sysLogs) > 0).Should(o.BeTrue(), "can't find logs from syslog in lokistack")
})
| |||||
test case
|
openshift/openshift-tests-private
|
3096f6f3-e43e-4d9b-a65f-c56757e6f26f
|
Author:qitang-CPaasrunOnly-High-76727-Add stream info to data model viaq[Serial][Slow]
|
['"fmt"', '"path/filepath"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:qitang-CPaasrunOnly-High-76727-Add stream info to data model viaq[Serial][Slow]", func() {
multilineLogs := []string{
javaExc, complexJavaExc, nestedJavaExc,
goExc, goOnGaeExc, goSignalExc, goHTTP,
rubyExc, railsExc,
clientJsExc, nodeJsExc, v8JsExc,
csharpAsyncExc, csharpNestedExc, csharpExc,
pythonExc,
phpOnGaeExc, phpExc,
dartAbstractClassErr,
dartArgumentErr,
dartAssertionErr,
dartAsyncErr,
dartConcurrentModificationErr,
dartDivideByZeroErr,
dartErr,
dartTypeErr,
dartExc,
dartUnsupportedErr,
dartUnimplementedErr,
dartOOMErr,
dartRangeErr,
dartReadStaticErr,
dartStackOverflowErr,
dartFallthroughErr,
dartFormatErr,
dartFormatWithCodeErr,
dartNoMethodErr,
dartNoMethodGlobalErr,
}
exutil.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-76727",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-76727",
storageClass: sc,
bucketName: "logging-loki-76727-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to forward logs to lokistack")
clf := clusterlogforwarder{
name: "instance-76727",
namespace: loggingNS,
serviceAccountName: "logcollector-76727",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-76727",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "detectmultiline", "type": "detectMultilineException"}]}, {"op": "add", "path": "/spec/pipelines/0/filterRefs", "value":["detectmultiline"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("create some pods to generate multiline errors")
multilineLogFile := filepath.Join(loggingBaseDir, "generatelog", "multiline-error-log.yaml")
ioStreams := []string{"stdout", "stderr"}
for _, ioStream := range ioStreams {
ns := "multiline-log-" + ioStream + "-76727"
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns, "--wait=false").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ns, "deploy/multiline-log", "cm/multiline-log").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-n", ns, "-f", multilineLogFile, "-p", "OUT_STREAM="+ioStream, "-p", "RATE=60.00").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("check data in Loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, ioStream := range ioStreams {
lc.waitForLogsAppearByProject("application", "multiline-log-"+ioStream+"-76727")
dataInLoki, _ := lc.searchByNamespace("application", "multiline-log-"+ioStream+"-76727")
lokiLog := extractLogEntities(dataInLoki)
for _, log := range lokiLog {
o.Expect(log.Kubernetes.ContainerIOStream == ioStream).Should(o.BeTrue(), `iostream is wrong, expected: `+ioStream+`, got: `+log.Kubernetes.ContainerIOStream)
o.Expect(containSubstring(multilineLogs, log.Message)).Should(o.BeTrue(), fmt.Sprintf("Parse multiline error failed, iostream: %s, message: \n%s", ioStream, log.Message))
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
c8549613-eeff-42c9-9eb1-94029dd204d5
|
Author:qitang-CPaasrunOnly-High-78380-Collector should collect logs from all log sources.[Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:qitang-CPaasrunOnly-High-78380-Collector should collect logs from all log sources.[Serial]", func() {
exutil.By("Deploying LokiStack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-78380",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-78380",
storageClass: sc,
bucketName: "logging-loki-78380-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to forward to lokistack")
clf := clusterlogforwarder{
name: "clf-78380-" + getRandomString(),
namespace: loggingNS,
serviceAccountName: "clf-78380",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-78380",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("checking app, infra and audit logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
lc.waitForLogsAppearByKey("audit", "log_type", "audit")
exutil.By("Check audit logs, should find logs from each directory")
// for OVN audit logs, it's covered in OCP-71143 and OCP-53995
for _, q := range []string{
`{log_type="audit"} | json | log_source="auditd"`,
`{log_type="audit"} | json | log_source="kubeAPI"`,
`{log_type="audit"} | json | log_source="openshiftAPI" | requestURI=~"/apis/route.openshift.io.+"`, //openshift-apiserver
`{log_type="audit"} | json | log_source="openshiftAPI" | requestURI=~"/apis/oauth.openshift.io/.+"`, //oauth-apiserver
`{log_type="audit"} | json | log_source="openshiftAPI" | requestURI=~"/oauth/authorize.+"`, //oauth-server
`{log_type="audit"} | json | log_source="openshiftAPI" | requestURI=~"/login/.+"`, //oauth-server
} {
err = lc.waitForLogsAppearByQuery("audit", q)
exutil.AssertWaitPollNoErr(err, "can't find log with query: "+q)
}
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.