element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case
|
openshift/openshift-tests-private
|
4d8ab249-1403-4e9f-a013-bb5c6413ed68
|
Author:qitang-CPaasrunOnly-Critical-75298-Forward to Loki with default labelKeys
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:qitang-CPaasrunOnly-Critical-75298-Forward to Loki with default labelKeys", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Fetch and set the Grafana Loki credentials")
lokiUsername, lokiPassword, err := getExtLokiSecret()
o.Expect(err).NotTo(o.HaveOccurred())
lokiURL := "https://logs-prod3.grafana.net"
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create secret with external Grafana Loki instance credentials")
sct := resource{"secret", "loki-client", clfNS}
defer sct.clear(oc)
_, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args(sct.kind, "generic", sct.name, "-n", sct.namespace, "--from-literal=username="+lokiUsername+"", "--from-literal=password="+lokiPassword+"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sct.WaitForResourceToAppear(oc)
g.By("Create ClusterLogForwarder to forward logs to the external Loki instance")
clf := clusterlogforwarder{
name: "clf-75298",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-external-loki-with-secret.yaml"),
secretName: sct.name,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "LOKI_URL="+lokiURL, "INPUTREFS=[\"application\"]", `TUNING={"compression": "snappy", "deliveryMode": "AtLeastOnce", "maxWrite": "10M"}`)
exutil.By("check logs in grafana loki")
lc := newLokiClient(lokiURL).withBasicAuth(lokiUsername, lokiPassword).retry(5)
lc.waitForLogsAppearByProject("", appProj)
exutil.By("Check configurations in collector pods")
expectedConfigs := []string{
`compression = "snappy"`,
`[sinks.output_loki_server.batch]
max_bytes = 10000000`,
`[sinks.output_loki_server.buffer]
type = "disk"
when_full = "block"
max_size = 268435488`,
}
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", expectedConfigs...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
a41c4ac6-623f-4828-8620-b82e77d754a1
|
CPaasrunOnly-Author:ikanse-Medium-48490-Vector Forward logs to Grafana Loki using HTTPS and existing loki.tenantKey kubernetes.labels.test
|
['"context"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:ikanse-Medium-48490-Vector Forward logs to Grafana Loki using HTTPS and existing loki.tenantKey kubernetes.labels.test", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Fetch and set the Grafana Loki credentials")
lokiUsername, lokiPassword, err := getExtLokiSecret()
o.Expect(err).NotTo(o.HaveOccurred())
lokiURL := "https://logs-prod3.grafana.net"
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create secret with external Grafana Loki instance credentials")
sct := resource{"secret", "loki-client", clfNS}
defer sct.clear(oc)
_, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args(sct.kind, "generic", sct.name, "-n", sct.namespace, "--from-literal=username="+lokiUsername+"", "--from-literal=password="+lokiPassword+"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sct.WaitForResourceToAppear(oc)
g.By("Create ClusterLogForwarder to forward logs to the external Loki instance with tenantKey kubernetes_labels.test")
clf := clusterlogforwarder{
name: "clf-48490",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-external-loki-with-secret-tenantKey.yaml"),
secretName: sct.name,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "LOKI_URL="+lokiURL, "TENANTKEY={.kubernetes.labels.test||\"none\"}")
g.By(fmt.Sprintf("Search for the %s project logs in Loki", appProj))
lc := newLokiClient(lokiURL).withBasicAuth(lokiUsername, lokiPassword).retry(5)
g.By("Searching for Application Logs in Loki")
appPodName, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Result[0].Stream.LogType == "application" && appLogs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed searching for application logs in Loki")
e2e.Logf("Application Logs Query is a success")
})
| |||||
test case
|
openshift/openshift-tests-private
|
0b79fba6-b12a-4254-8b1b-7442ebe396d2
|
CPaasrunOnly-Author:ikanse-Medium-48923-Vector Forward logs to Grafana Loki using HTTPS and existing loki.tenantKey kubernetes.namespace_name
|
['"context"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:ikanse-Medium-48923-Vector Forward logs to Grafana Loki using HTTPS and existing loki.tenantKey kubernetes.namespace_name", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Fetch and set the Grafana Loki credentials")
lokiUsername, lokiPassword, err := getExtLokiSecret()
o.Expect(err).NotTo(o.HaveOccurred())
lokiURL := "https://logs-prod3.grafana.net"
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create secret with external Grafana Loki instance credentials")
sct := resource{"secret", "loki-client", clfNS}
defer sct.clear(oc)
_, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args(sct.kind, "generic", sct.name, "-n", sct.namespace, "--from-literal=username="+lokiUsername+"", "--from-literal=password="+lokiPassword+"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sct.WaitForResourceToAppear(oc)
g.By("Create ClusterLogForwarder to forward logs to the external Loki instance with tenantKey kubernetes_labels.test")
clf := clusterlogforwarder{
name: "clf-48923",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-external-loki-with-secret-tenantKey.yaml"),
secretName: sct.name,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "LOKI_URL="+lokiURL, "TENANTKEY={.kubernetes.namespace_name||\"none\"}")
g.By(fmt.Sprintf("Search for the %s project logs in Loki", appProj))
lc := newLokiClient(lokiURL).withBasicAuth(lokiUsername, lokiPassword).retry(5)
g.By("Searching for Application Logs in Loki")
appPodName, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Result[0].Stream.LogType == "application" && appLogs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed searching for application logs in Loki")
e2e.Logf("Application Logs Query is a success")
})
| |||||
test case
|
openshift/openshift-tests-private
|
212227ff-d5ec-4320-8bf6-8f779b133c6d
|
CPaasrunOnly-Author:ikanse-High-62975-Collector connects to the remote output using the cipher defined in the tlsSecurityPrfoile [Slow][Disruptive]
|
['"context"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:ikanse-High-62975-Collector connects to the remote output using the cipher defined in the tlsSecurityPrfoile [Slow][Disruptive]", func() {
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Fetch and set the Grafana Loki credentials")
lokiUsername, lokiPassword, err := getExtLokiSecret()
o.Expect(err).NotTo(o.HaveOccurred())
lokiURL := "https://logs-prod3.grafana.net"
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create secret with external Grafana Loki instance credentials")
sct := resource{"secret", "loki-client", clfNS}
defer sct.clear(oc)
_, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args(sct.kind, "generic", sct.name, "-n", sct.namespace, "--from-literal=username="+lokiUsername+"", "--from-literal=password="+lokiPassword+"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sct.WaitForResourceToAppear(oc)
g.By("Create ClusterLogForwarder to forward logs to the external Loki instance")
clf := clusterlogforwarder{
name: "clf-62975",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-external-loki-with-secret.yaml"),
secretName: sct.name,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "LOKI_URL="+lokiURL, "INPUTREFS=[\"application\"]")
patch := `[{"op": "add", "path": "/spec/outputs/0/tls", "value": {"securityProfile": {"type": "Custom", "custom": {"ciphers": ["TLS_AES_128_CCM_SHA256"], "minTLSVersion": "VersionTLS13"}}}}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
g.By("The Loki sink in Vector config must use the Custom tlsSecurityProfile with ciphersuite TLS_AES_128_CCM_SHA256")
searchString := `[sinks.output_loki_server.tls]
min_tls_version = "VersionTLS13"
ciphersuites = "TLS_AES_128_CCM_SHA256"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 3*time.Minute, true, func(context.Context) (done bool, err error) {
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
if err != nil {
return false, nil
}
return strings.Contains(collectorLogs, "error trying to connect"), nil
})
exutil.AssertWaitPollNoErr(err, "Collector shouldn't connect to the external Loki server.")
g.By("Searching for Application Logs in Loki")
lc := newLokiClient(lokiURL).withBasicAuth(lokiUsername, lokiPassword).retry(5)
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj)
if err != nil {
return false, err
}
return appLogs.Status == "success" && len(appLogs.Data.Result) == 0, nil
})
exutil.AssertWaitPollNoErr(err, "Failed searching for application logs in Loki")
g.By("Set the Custom tlsSecurityProfile for Loki output")
patch = `[{"op": "replace", "path": "/spec/outputs/0/tls/securityProfile/custom/ciphers", "value": ["TLS_CHACHA20_POLY1305_SHA256"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("The Loki sink in Vector config must use the Custom tlsSecurityProfile with ciphersuite TLS_CHACHA20_POLY1305_SHA256")
searchString = `[sinks.output_loki_server.tls]
min_tls_version = "VersionTLS13"
ciphersuites = "TLS_CHACHA20_POLY1305_SHA256"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 3*time.Minute, true, func(context.Context) (done bool, err error) {
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
if err != nil {
return false, nil
}
return !strings.Contains(collectorLogs, "error trying to connect"), nil
})
exutil.AssertWaitPollNoErr(err, "Unable to connect to the external Loki server.")
g.By("Searching for Application Logs in Loki")
lc.waitForLogsAppearByProject("", appProj)
})
| |||||
test case
|
openshift/openshift-tests-private
|
58b93117-032f-4e70-8096-3b26c8e63015
|
CPaasrunOnly-Author:ikanse-Low-61476-Collector-External Loki output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]
|
['"context"', '"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:ikanse-Low-61476-Collector-External Loki output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]", func() {
var (
loglabeltemplate = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
g.By("Configure the global tlsSecurityProfile to use Intermediate profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"intermediate":{},"type":"Intermediate"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Fetch and set the Grafana Loki credentials")
lokiUsername, lokiPassword, err := getExtLokiSecret()
o.Expect(err).NotTo(o.HaveOccurred())
lokiURL := "https://logs-prod3.grafana.net"
g.By("Create project for app logs and deploy the log generator app")
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create secret with external Grafana Loki instance credentials")
sct := resource{"secret", "loki-client", clfNS}
defer sct.clear(oc)
_, err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args(sct.kind, "generic", sct.name, "-n", sct.namespace, "--from-literal=username="+lokiUsername+"", "--from-literal=password="+lokiPassword+"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sct.WaitForResourceToAppear(oc)
g.By("Create ClusterLogForwarder to forward logs to the external Loki instance")
clf := clusterlogforwarder{
name: "clf-61476",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "clf-external-loki-with-secret.yaml"),
secretName: sct.name,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
inputRefs := "[\"application\"]"
clf.create(oc, "LOKI_URL="+lokiURL, "INPUTREFS="+inputRefs)
g.By("The Loki sink in Vector config must use the intermediate tlsSecurityProfile")
searchString := `[sinks.output_loki_server.tls]
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Searching for Application Logs in Loki")
lc := newLokiClient(lokiURL).withBasicAuth(lokiUsername, lokiPassword).retry(5)
appPodName, err := oc.AdminKubeClient().CoreV1().Pods(appProj).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Result[0].Stream.LogType == "application" && appLogs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed searching for application logs in Loki")
e2e.Logf("Application Logs Query is a success")
g.By("Set the Modern tlsSecurityProfile for Loki output")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls", "value": {"securityProfile":{"type":"Modern"}}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj1 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj1, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("The Loki sink in Vector config must use the Modern tlsSecurityProfile")
searchString = `[sinks.output_loki_server.tls]
min_tls_version = "VersionTLS13"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external Loki server.")
g.By("Searching for Application Logs in Loki")
appPodName, err = oc.AdminKubeClient().CoreV1().Pods(appProj1).List(context.Background(), metav1.ListOptions{LabelSelector: "run=centos-logtest"})
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", appProj1)
if err != nil {
return false, err
}
if appLogs.Status == "success" && appLogs.Data.Stats.Summary.BytesProcessedPerSecond != 0 && appLogs.Data.Result[0].Stream.LogType == "application" && appLogs.Data.Result[0].Stream.KubernetesPodName == appPodName.Items[0].Name {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed searching for application logs in Loki")
e2e.Logf("Application Logs Query is a success")
})
| |||||
test case
|
openshift/openshift-tests-private
|
4f5bb7e8-7a59-4bc4-b416-9765d77be211
|
CPaasrunOnly-ConnectedOnly-Author:ikanse-High-54523-LokiStack Cluster Logging comply with the intermediate TLS security profile when global API Server has no tlsSecurityProfile defined[Slow][Disruptive]
|
['"context"', '"fmt"', '"os"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-ConnectedOnly-Author:ikanse-High-54523-LokiStack Cluster Logging comply with the intermediate TLS security profile when global API Server has no tlsSecurityProfile defined[Slow][Disruptive]", func() {
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Remove any tlsSecurityProfile config")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": null}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-54523",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-54523",
storageClass: sc,
bucketName: "logging-loki-54523-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-54523",
namespace: loggingNS,
serviceAccountName: "logcollector-54523",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-54523",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("checking app, audit and infra logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
g.By("Check that the LokiStack gateway is using the Intermediate tlsSecurityProfile")
server := fmt.Sprintf("%s-gateway-http:8081", ls.name)
checkTLSProfile(oc, "intermediate", "RSA", server, "/run/secrets/kubernetes.io/serviceaccount/service-ca.crt", ls.namespace, 2)
g.By("Check the LokiStack config for the intermediate TLS security profile ciphers and TLS version")
dirname := "/tmp/" + oc.Namespace() + "-lkcnf"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
expectedConfigs := []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "VersionTLS12"}
for i := 0; i < len(expectedConfigs); i++ {
count := strings.Count(string(lokiStackConf), expectedConfigs[i])
o.Expect(count).To(o.Equal(8), fmt.Sprintf("Unexpected number of occurrences of %s", expectedConfigs[i]))
}
g.By("Check the LokiStack pods have mounted the Loki config.yaml")
podList, err := oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/managed-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
gatewayPod := ls.name + "-gateway-"
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, gatewayPod) {
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", pod.Name, "-n", ls.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "/etc/loki/config/config.yaml")).Should(o.BeTrue())
vl := ls.name + "-config"
o.Expect(strings.Contains(output, vl)).Should(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
0bf2f5ad-5e8e-4b32-be1b-5d67f5e76605
|
CPaasrunOnly-ConnectedOnly-Author:ikanse-Medium-54525-LokiStack Cluster Logging comply with the old tlsSecurityProfile when configured in the global API server configuration[Slow][Disruptive]
|
['"context"', '"fmt"', '"os"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-ConnectedOnly-Author:ikanse-Medium-54525-LokiStack Cluster Logging comply with the old tlsSecurityProfile when configured in the global API server configuration[Slow][Disruptive]", func() {
if isFipsEnabled(oc) {
g.Skip("skip old tlsSecurityProfile on FIPS enabled cluster")
}
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Configure the global tlsSecurityProfile to use old profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"old":{},"type":"Old"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-54525",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-54525",
storageClass: sc,
bucketName: "logging-loki-54525-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-54525",
namespace: loggingNS,
serviceAccountName: "logcollector-54525",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-54525",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("checking app, audit and infra logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
g.By("Check that the LokiStack gateway is using the Old tlsSecurityProfile")
server := fmt.Sprintf("%s-gateway-http:8081", ls.name)
checkTLSProfile(oc, "old", "RSA", server, "/run/secrets/kubernetes.io/serviceaccount/service-ca.crt", ls.namespace, 2)
g.By("Check the LokiStack config for the Old TLS security profile ciphers and TLS version")
dirname := "/tmp/" + oc.Namespace() + "-lkcnf"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
expectedConfigs := []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA", "VersionTLS10"}
for i := 0; i < len(expectedConfigs); i++ {
count := strings.Count(string(lokiStackConf), expectedConfigs[i])
o.Expect(count).To(o.Equal(8), fmt.Sprintf("Unexpected number of occurrences of %s", expectedConfigs[i]))
}
g.By("Check the LokiStack pods have mounted the Loki config.yaml")
podList, err := oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/managed-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
gatewayPod := ls.name + "-gateway-"
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, gatewayPod) {
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", pod.Name, "-n", ls.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "/etc/loki/config/config.yaml")).Should(o.BeTrue())
vl := ls.name + "-config"
o.Expect(strings.Contains(output, vl)).Should(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
54d1ca74-abf3-4e9e-a2ec-d23ef40fd12d
|
Author:ikanse-CPaasrunOnly-ConnectedOnly-Medium-54526-Forwarding to lokistack comply with the custom tlsSecurityProfile when configured in the global API server configuration[Slow][Disruptive]
|
['"context"', '"fmt"', '"os"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("Author:ikanse-CPaasrunOnly-ConnectedOnly-Medium-54526-Forwarding to lokistack comply with the custom tlsSecurityProfile when configured in the global API server configuration[Slow][Disruptive]", func() {
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Configure the global tlsSecurityProfile to use custom profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"custom":{"ciphers":["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256"],"minTLSVersion":"VersionTLS12"},"type":"Custom"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-54526",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-54526",
storageClass: sc,
bucketName: "logging-loki-54526-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-54526",
namespace: loggingNS,
serviceAccountName: "logcollector-54526",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-54526",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("checking app, audit and infra logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
g.By("Check that the LokiStack gateway is using the Custom tlsSecurityProfile")
server := fmt.Sprintf("%s-gateway-http:8081", ls.name)
checkTLSProfile(oc, "custom", "RSA", server, "/run/secrets/kubernetes.io/serviceaccount/service-ca.crt", ls.namespace, 2)
g.By("Check the LokiStack config for the Custom TLS security profile ciphers and TLS version")
dirname := "/tmp/" + oc.Namespace() + "-lkcnf"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
expectedConfigs := []string{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "VersionTLS12"}
for i := 0; i < len(expectedConfigs); i++ {
count := strings.Count(string(lokiStackConf), expectedConfigs[i])
o.Expect(count).To(o.Equal(8), fmt.Sprintf("Unexpected number of occurrences of %s", expectedConfigs[i]))
}
g.By("Check the LokiStack pods have mounted the Loki config.yaml")
podList, err := oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/managed-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
gatewayPod := ls.name + "-gateway-"
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, gatewayPod) {
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", pod.Name, "-n", ls.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "/etc/loki/config/config.yaml")).Should(o.BeTrue())
vl := ls.name + "-config"
o.Expect(strings.Contains(output, vl)).Should(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
7766b780-5413-42d7-9ed5-705ac7f39240
|
CPaasrunOnly-ConnectedOnly-Author:ikanse-Medium-54527-LokiStack Cluster Logging comply with the global tlsSecurityProfile - old to intermediate[Slow][Disruptive]
|
['"context"', '"fmt"', '"os"', '"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-ConnectedOnly-Author:ikanse-Medium-54527-LokiStack Cluster Logging comply with the global tlsSecurityProfile - old to intermediate[Slow][Disruptive]", func() {
if isFipsEnabled(oc) {
g.Skip("skip old tlsSecurityProfile on FIPS enabled cluster")
}
var (
jsonLogFile = filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
)
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Configure the global tlsSecurityProfile to use old profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"old":{},"type":"Old"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-54527",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-54527",
storageClass: sc,
bucketName: "logging-loki-54527-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-54527",
namespace: loggingNS,
serviceAccountName: "logcollector-54527",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-54527",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("checking app, audit and infra logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", appProj)
g.By("Check that the LokiStack gateway is using the Old tlsSecurityProfile")
server := fmt.Sprintf("%s-gateway-http:8081", ls.name)
checkTLSProfile(oc, "old", "RSA", server, "/run/secrets/kubernetes.io/serviceaccount/service-ca.crt", ls.namespace, 2)
g.By("Check the LokiStack config for the Old TLS security profile ciphers and TLS version")
dirname := "/tmp/" + oc.Namespace() + "-lkcnf"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
expectedConfigs := []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA", "VersionTLS10"}
for i := 0; i < len(expectedConfigs); i++ {
count := strings.Count(string(lokiStackConf), expectedConfigs[i])
o.Expect(count).To(o.Equal(8), fmt.Sprintf("Unexpected number of occurrences of %s", expectedConfigs[i]))
}
g.By("Check the LokiStack pods have mounted the Loki config.yaml")
podList, err := oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/managed-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
gatewayPod := ls.name + "-gateway-"
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, gatewayPod) {
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", pod.Name, "-n", ls.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "/etc/loki/config/config.yaml")).Should(o.BeTrue())
vl := ls.name + "-config"
o.Expect(strings.Contains(output, vl)).Should(o.BeTrue())
}
}
g.By("Configure the global tlsSecurityProfile to use Intermediate profile")
patch = `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"intermediate":{},"type":"Intermediate"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
e2e.Logf("Sleep for 3 minutes to allow LokiStack to reconcile and use the changed tlsSecurityProfile config.")
time.Sleep(3 * time.Minute)
ls.waitForLokiStackToBeReady(oc)
waitForOperatorsRunning(oc)
g.By("create a new project")
oc.SetupProject()
newAppProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", newAppProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("checking app, audit and infra logs in loki")
route = "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc = newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
lc.waitForLogsAppearByProject("application", newAppProj)
g.By("Check that the LokiStack gateway is using the intermediate tlsSecurityProfile")
server = fmt.Sprintf("%s-gateway-http:8081", ls.name)
checkTLSProfile(oc, "intermediate", "RSA", server, "/run/secrets/kubernetes.io/serviceaccount/service-ca.crt", ls.namespace, 2)
g.By("Check the LokiStack config for the intermediate TLS security profile ciphers and TLS version")
os.RemoveAll(dirname)
dirname = "/tmp/" + oc.Namespace() + "-lkcnf"
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err = os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
expectedConfigs = []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "VersionTLS12"}
for i := 0; i < len(expectedConfigs); i++ {
count := strings.Count(string(lokiStackConf), expectedConfigs[i])
o.Expect(count).To(o.Equal(8), fmt.Sprintf("Unexpected number of occurrences of %s", expectedConfigs[i]))
}
g.By("Check the LokiStack pods have mounted the Loki config.yaml")
podList, err = oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/managed-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
gatewayPod = ls.name + "-gateway-"
for _, pod := range podList.Items {
if !strings.HasPrefix(pod.Name, gatewayPod) {
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", pod.Name, "-n", ls.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, "/etc/loki/config/config.yaml")).Should(o.BeTrue())
vl := ls.name + "-config"
o.Expect(strings.Contains(output, vl)).Should(o.BeTrue())
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
4ffbc43d-1a5f-4f79-8896-0c5b59dcc20f
|
CPaasrunOnly-Author:kbharti-High-52779-High-55393-Loki Operator - Validate alert and recording rules in LokiRuler configmap and Rules API(cluster-admin)[Serial]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:kbharti-High-52779-High-55393-Loki Operator - Validate alert and recording rules in LokiRuler configmap and Rules API(cluster-admin)[Serial]", func() {
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
appProj := oc.Namespace()
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", appProj, "openshift.io/cluster-monitoring=true").Execute()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR")
ls := lokiStack{
name: "loki-52779",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-52779",
storageClass: sc,
bucketName: "logging-loki-52779-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
g.By("Create Loki Alert and recording rules")
appAlertingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-alerting-rule-template.yaml")
appAlertRule := resource{"alertingrule", "my-app-workload-alert", appProj}
defer appAlertRule.clear(oc)
err = appAlertRule.applyFromTemplate(oc, "-n", appAlertRule.namespace, "-f", appAlertingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
appRecordingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-recording-rule-template.yaml")
appRecordRule := resource{"recordingrule", "my-app-workload-record", appProj}
defer appRecordRule.clear(oc)
err = appRecordRule.applyFromTemplate(oc, "-n", appRecordRule.namespace, "-f", appRecordingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
infraAlertingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-infra-alerting-rule-template.yaml")
infraAlertRule := resource{"alertingrule", "my-infra-workload-alert", loNS}
defer infraAlertRule.clear(oc)
err = infraAlertRule.applyFromTemplate(oc, "-n", infraAlertRule.namespace, "-f", infraAlertingTemplate)
o.Expect(err).NotTo(o.HaveOccurred())
infraRecordingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-infra-recording-rule-template.yaml")
infraRecordRule := resource{"recordingrule", "my-infra-workload-record", loNS}
defer infraRecordRule.clear(oc)
err = infraRecordRule.applyFromTemplate(oc, "-n", infraRecordRule.namespace, "-f", infraRecordingTemplate)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-52779",
namespace: loggingNS,
serviceAccountName: "logcollector-52779",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-52779",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Validating loki rules configmap")
expectedRules := []string{appProj + "-my-app-workload-alert", appProj + "-my-app-workload-record", loNS + "-my-infra-workload-alert", loNS + "-my-infra-workload-record"}
rulesCM, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", ls.namespace, ls.name+"-rules-0", "-o=jsonpath={.data}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, expectedRule := range expectedRules {
if !strings.Contains(string(rulesCM), expectedRule) {
g.Fail("Response is missing " + expectedRule)
}
}
e2e.Logf("Data has been validated in the rules configmap")
g.By("Querying rules API for application alerting/recording rules")
// adding cluster-admin role to a sa, but still can't query rules without `kubernetes_namespace_name=<project-name>`
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
appRules, err := lc.queryRules("application", appProj)
o.Expect(err).NotTo(o.HaveOccurred())
matchDataInResponse := []string{"name: MyAppLogVolumeAlert", "alert: MyAppLogVolumeIsHigh", "tenantId: application", "name: HighAppLogsToLoki1m", "record: loki:operator:applogs:rate1m"}
for _, matchedData := range matchDataInResponse {
if !strings.Contains(string(appRules), matchedData) {
g.Fail("Response is missing " + matchedData)
}
}
infraRules, err := lc.queryRules("infrastructure", loNS)
o.Expect(err).NotTo(o.HaveOccurred())
matchDataInResponse = []string{"name: LokiOperatorLogsHigh", "alert: LokiOperatorLogsAreHigh", "tenantId: infrastructure", "name: LokiOperatorLogsAreHigh1m", "record: loki:operator:infralogs:rate1m"}
for _, matchedData := range matchDataInResponse {
if !strings.Contains(string(infraRules), matchedData) {
g.Fail("Response is missing " + matchedData)
}
}
e2e.Logf("Rules API response validated succesfully")
})
| |||||
test case
|
openshift/openshift-tests-private
|
8400859f-0d1e-40fd-ad15-222679452d0d
|
CPaasrunOnly-Author:kbharti-Critical-55415-Loki Operator - Validate AlertManager support for cluster-monitoring is decoupled from User-workload monitoring[Serial]
|
['"os"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:kbharti-Critical-55415-Loki Operator - Validate AlertManager support for cluster-monitoring is decoupled from User-workload monitoring[Serial]", func() {
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
appProj := oc.Namespace()
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", appProj, "openshift.io/cluster-monitoring=true").Execute()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR")
ls := lokiStack{
name: "loki-55415",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-55415",
storageClass: sc,
bucketName: "logging-loki-55415-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-55415",
namespace: loggingNS,
serviceAccountName: "logcollector-55415",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-55415",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Create Loki Alert and recording rules")
alertingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-alerting-rule-template.yaml")
alertRule := resource{"alertingrule", "my-app-workload-alert", appProj}
defer alertRule.clear(oc)
err = alertRule.applyFromTemplate(oc, "-n", alertRule.namespace, "-f", alertingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
recordingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-recording-rule-template.yaml")
recordingRule := resource{"recordingrule", "my-app-workload-record", appProj}
defer recordingRule.clear(oc)
err = recordingRule.applyFromTemplate(oc, "-n", recordingRule.namespace, "-f", recordingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
g.By("Validate AlertManager support for Cluster-Monitoring under openshift-monitoring")
dirname := "/tmp/" + oc.Namespace() + "-log-alerts"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
files, err := os.ReadDir(dirname)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(files)).To(o.Equal(2)) //since we have config and runtime-config under lokistack-config cm
amURL := "alertmanager_url: https://_web._tcp.alertmanager-operated.openshift-monitoring.svc"
for _, file := range files {
if file.Name() == "config.yaml" {
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(lokiStackConf), amURL)).Should(o.BeTrue())
}
if file.Name() == "runtime-config.yaml" {
lokiStackConf, err := os.ReadFile(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(lokiStackConf), "alertmanager_url")).ShouldNot(o.BeTrue())
}
}
g.By("Query AlertManager for Firing Alerts")
bearerToken := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
queryAlertManagerForActiveAlerts(oc, bearerToken, false, "MyAppLogVolumeIsHigh", 5)
})
| |||||
test case
|
openshift/openshift-tests-private
|
235220e3-ab02-4625-85ca-55c7e158818a
|
CPaasrunOnly-Author:kbharti-Medium-61435-Loki Operator - Validate AlertManager support for User-workload monitoring[Serial]
|
['"os"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_loki.go
|
g.It("CPaasrunOnly-Author:kbharti-Medium-61435-Loki Operator - Validate AlertManager support for User-workload monitoring[Serial]", func() {
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
appProj := oc.Namespace()
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", appProj, "openshift.io/cluster-monitoring=true").Execute()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR")
ls := lokiStack{
name: "loki-61435",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-61435",
storageClass: sc,
bucketName: "logging-loki-61435-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance-61435",
namespace: loggingNS,
serviceAccountName: "logcollector-61435",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-61435",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Enable User Workload Monitoring")
enableUserWorkloadMonitoringForLogging(oc)
defer deleteUserWorkloadManifests(oc)
g.By("Create Loki Alert and recording rules")
alertingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-alerting-rule-template.yaml")
alertRule := resource{"alertingrule", "my-app-workload-alert", appProj}
defer alertRule.clear(oc)
err = alertRule.applyFromTemplate(oc, "-n", alertRule.namespace, "-f", alertingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
recordingTemplate := filepath.Join(loggingBaseDir, "loki-log-alerts", "loki-app-recording-rule-template.yaml")
recordingRule := resource{"recordingrule", "my-app-workload-record", appProj}
defer recordingRule.clear(oc)
err = recordingRule.applyFromTemplate(oc, "-n", recordingRule.namespace, "-f", recordingTemplate, "-p", "NAMESPACE="+appProj)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
g.By("Validate AlertManager support for Cluster-Monitoring under openshift-monitoring")
dirname := "/tmp/" + oc.Namespace() + "-log-alerts"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
files, err := os.ReadDir(dirname)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(files)).To(o.Equal(2)) //since we have config and runtime-config under lokistack-config cm
amURL := "alertmanager_url: https://_web._tcp.alertmanager-operated.openshift-monitoring.svc"
userWorkloadAMURL := "alertmanager_url: https://_web._tcp.alertmanager-operated.openshift-user-workload-monitoring.svc"
for _, file := range files {
if file.Name() == "config.yaml" {
lokiStackConf, err := os.ReadFile(dirname + "/config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(lokiStackConf), amURL)).Should(o.BeTrue())
}
if file.Name() == "runtime-config.yaml" {
lokiStackConf, err := os.ReadFile(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(lokiStackConf), userWorkloadAMURL)).Should(o.BeTrue())
}
}
g.By("Query User workload AlertManager for Firing Alerts")
defer removeClusterRoleFromServiceAccount(oc, appProj, "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, appProj, "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", appProj)
//token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
queryAlertManagerForActiveAlerts(oc, bearerToken, true, "MyAppLogVolumeIsHigh", 5)
})
| |||||
test
|
openshift/openshift-tests-private
|
87ed0296-0a8c-467c-a3a0-ecad2d663294
|
vector_splunk
|
import (
"context"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
package logging
import (
"context"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vector-splunk", exutil.KubeConfigPath())
loggingBaseDir string
)
g.Context("Log Forward to splunk", func() {
// author [email protected]
g.BeforeEach(func() {
nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: "kubernetes.io/os=linux,kubernetes.io/arch=amd64"})
if err != nil || len(nodes.Items) == 0 {
g.Skip("Skip for the cluster doesn't have amd64 node")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
g.By("deploy CLO")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
CLO.SubscribeOperator(oc)
})
g.It("Author:anli-CPaasrunOnly-High-54980-Vector forward logs to Splunk 9.0 over HTTP", func() {
oc.SetupProject()
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
// The secret used in CLF to splunk server
clfSecret := toSplunkSecret{
name: "to-splunk-secret-54980",
namespace: splunkProject,
hecToken: sp.hecToken,
caFile: "",
keyFile: "",
certFile: "",
passphrase: "",
}
clf := clusterlogforwarder{
name: "clf-54980",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX=main")
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check logs in splunk")
o.Expect(sp.anyLogFound()).To(o.BeTrue())
})
g.It("Author:anli-CPaasrunOnly-Medium-56248-vector forward logs to splunk 8.2 over TLS - SkipVerify", func() {
oc.SetupProject()
splunkProject := oc.Namespace()
keysPath := filepath.Join("/tmp/temp" + getRandomString())
sp := splunkPodServer{
namespace: splunkProject,
name: "splunk-https",
authType: "tls_serveronly",
version: "8.2",
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/server.key",
certFile: keysPath + "/server.crt",
passphrase: "",
}
sp.init()
// The secret used in CLF to splunk server
clfSecret := toSplunkSecret{
name: "to-splunk-secret-56248",
namespace: splunkProject,
hecToken: sp.hecToken,
caFile: keysPath + "/fake_ca.crt",
keyFile: "",
certFile: "",
passphrase: "",
}
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("generate fake certifate for testing")
defer exec.Command("rm", "-r", keysPath).Output()
err := os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/kube-root-ca.crt", "-n", clfSecret.namespace, "--confirm", "--to="+keysPath).Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = os.Rename(keysPath+"/ca.crt", clfSecret.caFile)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{sp.serviceName, sp.namespace, sp.passphrase}
cert.generateCerts(oc, keysPath)
g.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-56248",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk-serveronly.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=https://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "SKIP_VERIFY=true")
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check logs in splunk")
o.Expect(sp.anyLogFound()).To(o.BeTrue())
})
g.It("Author:anli-CPaasrunOnly-Critical-54976-vector forward logs to splunk 9.0 over TLS - ServerOnly", func() {
oc.SetupProject()
splunkProject := oc.Namespace()
keysPath := filepath.Join("/tmp/temp" + getRandomString())
sp := splunkPodServer{
namespace: splunkProject,
name: "splunk-https",
authType: "tls_serveronly",
version: "9.0",
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/server.key",
certFile: keysPath + "/server.crt",
passphrase: "",
}
sp.init()
// The secret used in CLF to splunk server
clfSecret := toSplunkSecret{
name: "to-splunk-secret-55976",
namespace: splunkProject,
hecToken: sp.hecToken,
caFile: keysPath + "/ca.crt",
keyFile: "",
certFile: "",
passphrase: "",
}
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Generate certifcate for testing")
defer exec.Command("rm", "-r", keysPath).Output()
err := os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{sp.serviceName, sp.namespace, sp.passphrase}
cert.generateCerts(oc, keysPath)
g.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-55976",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk-serveronly.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=https://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name)
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check logs in splunk")
o.Expect(sp.allTypeLogsFound()).To(o.BeTrue())
})
g.It("Author:anli-CPaasrunOnly-Medium-54978-vector forward logs to splunk 8.2 over TLS - Client Key Passphase", func() {
oc.SetupProject()
splunkProject := oc.Namespace()
keysPath := filepath.Join("/tmp/temp" + getRandomString())
sp := splunkPodServer{
namespace: splunkProject,
name: "splunk-https",
authType: "tls_clientauth",
version: "8.2",
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/server.key",
certFile: keysPath + "/server.crt",
passphrase: "aosqetmp",
}
sp.init()
clfSecret := toSplunkSecret{
name: "to-splunk-secret-54978",
namespace: splunkProject,
hecToken: sp.hecToken,
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/client.key",
certFile: keysPath + "/client.crt",
passphrase: sp.passphrase,
}
clf := clusterlogforwarder{
name: "clf-54978",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk-mtls-passphrase.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Generate certifcate for testing")
defer exec.Command("rm", "-r", keysPath).Output()
err := os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{sp.serviceName, sp.namespace, sp.passphrase}
cert.generateCerts(oc, keysPath)
g.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=https://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name)
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check logs in splunk")
o.Expect(sp.anyLogFound()).To(o.BeTrue())
})
g.It("Author:anli-CPaasrunOnly-Medium-54979-vector forward logs to splunk 9.0 over TLS - ClientAuth", func() {
oc.SetupProject()
splunkProject := oc.Namespace()
keysPath := filepath.Join("/tmp/temp" + getRandomString())
sp := splunkPodServer{
namespace: splunkProject,
name: "splunk-https",
authType: "tls_clientauth",
version: "9.0",
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/server.key",
certFile: keysPath + "/server.crt",
passphrase: "",
}
sp.init()
clfSecret := toSplunkSecret{
name: "to-splunk-secret-54979",
namespace: splunkProject,
hecToken: sp.hecToken,
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/client.key",
certFile: keysPath + "/client.crt",
passphrase: "",
}
clf := clusterlogforwarder{
name: "clf-54979",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk-mtls.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Generate certifcate for testing")
defer exec.Command("rm", "-r", keysPath).Output()
err := os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{sp.serviceURL, sp.namespace, ""}
cert.generateCerts(oc, keysPath)
g.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=https://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name)
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check logs in splunk")
o.Expect(sp.anyLogFound()).To(o.BeTrue())
})
})
g.Context("Splunk Custom Tenant", func() {
g.BeforeEach(func() {
nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: "kubernetes.io/os=linux,kubernetes.io/arch=amd64"})
if err != nil || len(nodes.Items) == 0 {
g.Skip("Skip for the cluster doesn't have amd64 node")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
exutil.By("deploy CLO")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
CLO.SubscribeOperator(oc)
})
g.It("Author:anli-CPaasrunOnly-High-71028-Forward logs to Splunk index by setting indexName", func() {
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
indexName := "custom-index-" + getRandomString()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
errIndex := sp.createIndexes(oc, indexName)
o.Expect(errIndex).NotTo(o.HaveOccurred())
exutil.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clfSecret := toSplunkSecret{
name: "splunk-secret-71028",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-71028",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX="+indexName)
exutil.By("check logs in splunk")
for _, logType := range []string{"application", "audit", "infrastructure"} {
o.Expect(sp.checkLogs("index=\""+indexName+"\" log_type=\""+logType+"\"")).To(o.BeTrue(), "can't find "+logType+" logs in "+indexName+" index")
r, e := sp.searchLogs("index=\"main\" log_type=\"" + logType + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find "+logType+" logs in default index, this is not expected")
}
})
g.It("Author:qitang-CPaasrunOnly-High-71029-Forward logs to Splunk indexes by kubernetes.namespace_name[Slow]", func() {
exutil.By("create log producer")
appProj := oc.Namespace()
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
var indexes []string
namespaces, err := oc.AdminKubeClient().CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
for _, ns := range namespaces.Items {
if ns.Name != "default" {
indexes = append(indexes, ns.Name)
}
}
errIndex := sp.createIndexes(oc, indexes...)
o.Expect(errIndex).NotTo(o.HaveOccurred())
clfSecret := toSplunkSecret{
name: "splunk-secret-71029",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-71029",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX={.kubernetes.namespace_name||\"\"}")
exutil.By("check logs in splunk")
// not all of the projects in cluster have container logs, so here only check some of the projects
// container logs should only be stored in the index named as it's namespace name
for _, index := range []string{appProj, "openshift-cluster-version", "openshift-dns", "openshift-ingress", "openshift-monitoring"} {
o.Expect(sp.checkLogs("index=\""+index+"\"")).To(o.BeTrue(), "can't find logs in "+index+" index")
r, e := sp.searchLogs("index=\"" + index + "\" kubernetes.namespace_name!=\"" + index + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find logs from other namespaces in "+index+" index, this is not expected")
r, e = sp.searchLogs("index!=\"" + index + "\" kubernetes.namespace_name=\"" + index + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find logs from project "+index+" in other indexes, this is not expected")
}
// audit logs and journal logs should be stored in the default index, which is named main
for _, logType := range []string{"audit", "infrastructure"} {
o.Expect(sp.checkLogs("index=\"main\" log_type=\""+logType+"\"")).To(o.BeTrue(), "can't find "+logType+" logs in main index")
}
})
g.It("Author:qitang-CPaasrunOnly-High-71031-Forward logs to Splunk indexes by openshift.labels", func() {
exutil.By("create log producer")
appProj := oc.Namespace()
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
index := "multi_splunk-indexes_71031"
errIndex := sp.createIndexes(oc, index)
o.Expect(errIndex).NotTo(o.HaveOccurred())
clfSecret := toSplunkSecret{
name: "splunk-secret-71031",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-71031",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX={.openshift.labels.test||\"\"}")
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "labels", "type": "openshiftLabels", "openshiftLabels": {"test": "` + index + `"}}]}, {"op": "add", "path": "/spec/pipelines/0/filterRefs", "value": ["labels"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
//sleep 10 seconds for collector pods to send logs to splunk
time.Sleep(10 * time.Second)
exutil.By("check logs in splunk")
for _, logType := range []string{"infrastructure", "application", "audit"} {
o.Expect(sp.checkLogs("index=\""+index+"\" log_type=\""+logType+"\"")).To(o.BeTrue(), "can't find "+logType+" logs in "+index+" index")
}
for _, logType := range []string{"application", "infrastructure", "audit"} {
r, e := sp.searchLogs("index=\"main\" log_type=\"" + logType + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find "+logType+" logs in default index, this is not expected")
}
})
g.It("Author:qitang-CPaasrunOnly-Medium-71035-Forward logs to Splunk indexes by kubernetes.labels", func() {
exutil.By("create log producer")
appProj := oc.Namespace()
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate, "-p", `LABELS={"test-logging": "logging-OCP_71035", "test.logging.io/logging.qe-test-label": "logging-OCP_71035"}`).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
index := "logging-OCP_71035"
errIndex := sp.createIndexes(oc, index)
o.Expect(errIndex).NotTo(o.HaveOccurred())
clfSecret := toSplunkSecret{
name: "splunk-secret-71035",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-71035",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX={.kubernetes.labels.\"test.logging.io/logging.qe-test-label\"||\"\"}")
exutil.By("check logs in splunk")
// logs from project appProj should be stored in 'logging-OCP_71035', other logs should be in default index
o.Expect(sp.checkLogs("index=\""+index+"\"")).To(o.BeTrue(), "can't find logs in "+index+" index")
r, e := sp.searchLogs("index=\"" + index + "\", kubernetes.namespace_name!=\"" + appProj + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find logs from other namespaces in "+index+" index, this is not expected")
r, e = sp.searchLogs("index!=\"" + index + "\", kubernetes.namespace_name=\"" + appProj + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find logs from project "+appProj+" in other indexes, this is not expected")
for _, logType := range []string{"audit", "infrastructure"} {
o.Expect(sp.checkLogs("index=\"main\", log_type=\""+logType+"\"")).To(o.BeTrue(), "can't find "+logType+" logs in main index")
r, e := sp.searchLogs("index=\"" + index + "\", log_type=\"" + logType + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find "+logType+" logs in "+index+" index, this is not expected")
}
})
g.It("Author:anli-CPaasrunOnly-High-75234-logs fallback to default splunk index if template syntax can not be found", func() {
exutil.By("create log producer")
appProj := oc.Namespace()
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate, "-p", "LABELS={\"test-logging\": \"logging-OCP-71322\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
o.Expect(sp.createIndexes(oc, appProj)).NotTo(o.HaveOccurred())
o.Expect(sp.createIndexes(oc, "openshift-operator-lifecycle-manager")).NotTo(o.HaveOccurred())
clfSecret := toSplunkSecret{
name: "splunk-secret-71322",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-71322",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX={.kubernetes.namespace_name||\"main\"}")
exutil.By("verify logs can are found in namespace_name index")
allFound := true
for _, logIndex := range []string{appProj, "openshift-operator-lifecycle-manager"} {
if sp.checkLogs("index=" + logIndex) {
e2e.Logf("found logs in index %s", logIndex)
} else {
e2e.Logf("can not find logs in index %s", logIndex)
allFound = false
}
}
o.Expect(allFound).To(o.BeTrue(), "can't find some logs in namespace_name index ")
exutil.By("verify infra and audit logs are send to main index")
allFound = true
for _, logType := range []string{"audit", "infrastructure"} {
if sp.checkLogs(`index="main" log_type="` + logType + `"`) {
e2e.Logf("found logs %s in index main", logType)
} else {
e2e.Logf("Can not find logs %s in index main ", logType)
allFound = false
}
}
o.Expect(allFound).To(o.BeTrue(), "can't find some type of logs in main index")
})
g.It("Author:anli-CPaasrunOnly-Critical-68303-mCLF Inputs.receiver.http multiple Inputs.receivers to splunk", func() {
clfNS := oc.Namespace()
splunkProject := clfNS
g.By("Deploy splunk server")
//define splunk deployment
sp := splunkPodServer{
namespace: splunkProject,
name: "splunk-http",
authType: "http",
version: "9.0",
}
sp.init()
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder/instance")
// The secret used in CLF to splunk server
clfSecret := toSplunkSecret{
name: "to-splunk-secret-68303",
namespace: clfNS,
hecToken: sp.hecToken,
caFile: "",
keyFile: "",
certFile: "",
passphrase: "",
}
defer clfSecret.delete(oc)
clfSecret.create(oc)
clf := clusterlogforwarder{
name: "http-to-splunk",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "httpserver-to-splunk.yaml"),
secretName: clfSecret.name,
serviceAccountName: "clf-" + getRandomString(),
waitForPodReady: true,
collectAuditLogs: false,
collectApplicationLogs: false,
collectInfrastructureLogs: false,
}
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088")
g.By("send data to httpservers")
o.Expect(postDataToHttpserver(oc, clfNS, "https://"+clf.name+"-httpserver1."+clfNS+".svc:8081", `{"test data" : "from httpserver1"}`)).To(o.BeTrue())
o.Expect(postDataToHttpserver(oc, clfNS, "https://"+clf.name+"-httpserver2."+clfNS+".svc:8082", `{"test data" : "from httpserver2"}`)).To(o.BeTrue())
o.Expect(postDataToHttpserver(oc, clfNS, "https://"+clf.name+"-httpserver3."+clfNS+".svc:8083", `{"test data" : "from httpserver3"}`)).To(o.BeTrue())
g.By("check logs in splunk")
o.Expect(sp.auditLogFound()).To(o.BeTrue())
})
g.It("Author:anli-CPaasrunOnly-Medium-75386-ClusterLogForwarder input validation testing.", func() {
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
clfSecret := toSplunkSecret{
name: "splunk-secret-75386",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-75386",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX={.kubernetes.non_existing.key||\"\"}")
exutil.By("update CLF to set invalid glob for namespace")
patch := `[{"op":"add","path":"/spec/inputs","value":[{"name":"new-app","type":"application","application":{"excludes":[{"namespace":"invalid-name@"}],"includes":[{"namespace":"tes*t"}]}}]},{"op":"replace","path":"/spec/pipelines/0/inputRefs","value":["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, "globs must match", []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.inputConditions[0].message}"})
exutil.By("update CLF to set invalid sources for infrastructure logs")
patch = `[{"op":"replace","path":"/spec/inputs","value":[{"name":"selected-infra","type":"infrastructure","infrastructure":{"sources":["nodesd","containersf"]}}]},{"op":"replace","path":"/spec/pipelines/0/inputRefs","value":["selected-infra"]}]`
outString, _ := clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75386" is invalid`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `Unsupported value: "nodesd": supported values: "container", "node"`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `Unsupported value: "containersf": supported values: "container", "node"`)).To(o.BeTrue())
exutil.By("update CLF to set invalid sources for audit logs")
patch = `[{"op":"replace","path":"/spec/pipelines/0/inputRefs","value":["selected-audit"]},{"op":"replace","path":"/spec/inputs","value":[{"name":"selected-audit","type":"audit","audit":{"sources":["nodess","containersf"]}}]}]`
outString, _ = clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75386" is invalid`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `Unsupported value: "nodess": supported values: "auditd", "kubeAPI", "openshiftAPI", "ovn"`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `Unsupported value: "containersf": supported values: "auditd", "kubeAPI", "openshiftAPI", "ovn"`)).To(o.BeTrue())
exutil.By("update CLF to use string as matchExpressions values")
patch = `[{"op":"replace","path":"/spec/inputs/0/application","value":{"selector":{"matchExpressions":[{"key":"test.logging.io/logging.qe-test-label","operator":"Exists","values":"logging-71749-test-1"}]}}}]`
outString, _ = clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75386" is invalid`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `spec.inputs[0].application.selector.matchExpressions[0].values: Invalid value: "string"`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `spec.inputs[0].application.selector.matchExpressions[0].values in body must be of type array: "string"`)).To(o.BeTrue())
})
g.It("Author:qitang-CPaasrunOnly-Medium-75390-CLF should be rejected and show error message if the filters are invalid", func() {
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
clfSecret := toSplunkSecret{
name: "splunk-secret-75390",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-75390",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name)
exutil.By("verfy clf without drop spec is rejected")
patch := `[{"op":"add","path":"/spec/filters","value":[{"name":"drop-logs","type":"drop"}]},{"op":"add","path":"/spec/pipelines/0/filterRefs","value":["drop-logs"]}]`
outString, _ := clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75390" is invalid:`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `invalid: spec.filters[0]: Invalid value: "object": Additional type specific spec is required for the filter type`)).To(o.BeTrue())
exutil.By("verfy clf with invalid drop fileds is rejected")
patch = `[{"op":"add","path":"/spec/filters","value":[{"name":"drop-logs","type":"drop","drop":[{"test":[{"field":".kubernetes.labels.test.logging.io/logging.qe-test-label","matches":".+"}]}]}]},{"op":"add","path":"/spec/pipelines/0/filterRefs","value":["drop-logs"]}]`
outString, _ = clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75390" is invalid:`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `spec.filters[0].drop[0].test[0].field: Invalid value: ".kubernetes.labels.test.logging.io/logging.qe-test-label"`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `spec.filters[0].drop[0].test[0].field in body should match '^(\.[a-zA-Z0-9_]+|\."[^"]+")(\.[a-zA-Z0-9_]+|\."[^"]+")*$`)).To(o.BeTrue())
exutil.By("verify CLF without prune spec is rejected")
patch = `[{"op":"add","path":"/spec/filters", "value": [{"name": "prune-logs", "type": "prune"}]},{"op":"add","path":"/spec/pipelines/0/filterRefs","value":["prune-logs"]}]`
outString, _ = clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75390" is invalid:`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, ` Invalid value: "object": Additional type specific spec is required for the filter type`)).To(o.BeTrue())
exutil.By("verify CLF with invalid prune value is rejected")
patch = `[{"op":"add","path":"/spec/filters","value":[{"name":"prune-logs","type":"prune","prune":{"in":[".kubernetes.namespace_labels.pod-security.kubernetes.io/audit",".file",".kubernetes.annotations"]}}]},{"op":"add","path":"/spec/pipelines/0/filterRefs","value":["prune-logs"]}]`
outString, _ = clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75390" is invalid:`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `Invalid value: ".kubernetes.namespace_labels.pod-security.kubernetes.io/audit"`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `body should match '^(\.[a-zA-Z0-9_]+|\."[^"]+")(\.[a-zA-Z0-9_]+|\."[^"]+")*$'`)).To(o.BeTrue())
exutil.By("verify filtersStatus show error when prune fields include .log_type, .message or .log_source")
patch = `[{"op":"add","path":"/spec/filters","value":[{"name":"prune-logs","prune":{"in":[".log_type",".message",".log_source"]},"type":"prune"}]},{"op":"add","path":"/spec/pipelines/0/filterRefs","value":["prune-logs"]}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, `prune-logs: [[".log_type" ".message" ".log_source"] is/are required fields and must be removed from the`+" `in` list.]", []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.filterConditions[0].message}"})
patch = `[{"op":"replace","path":"/spec/filters","value":[{"name":"prune-logs","prune":{"notIn":[".kubernetes",".\"@timestamp\"",".openshift",".hostname"]},"type":"prune"}]}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, `prune-logs: [[".log_source" ".log_type" ".message"] is/are required fields and must be included in`+" the `notIn` list.]", []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.filterConditions[0].message}"})
})
})
})
|
package logging
| ||||
test case
|
openshift/openshift-tests-private
|
a6c82c78-7c28-47c9-a57c-54ff0c856a12
|
Author:anli-CPaasrunOnly-High-54980-Vector forward logs to Splunk 9.0 over HTTP
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:anli-CPaasrunOnly-High-54980-Vector forward logs to Splunk 9.0 over HTTP", func() {
oc.SetupProject()
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
// The secret used in CLF to splunk server
clfSecret := toSplunkSecret{
name: "to-splunk-secret-54980",
namespace: splunkProject,
hecToken: sp.hecToken,
caFile: "",
keyFile: "",
certFile: "",
passphrase: "",
}
clf := clusterlogforwarder{
name: "clf-54980",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX=main")
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check logs in splunk")
o.Expect(sp.anyLogFound()).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
fe8d1d70-77c0-499c-9898-71a23994e2c4
|
Author:anli-CPaasrunOnly-Medium-56248-vector forward logs to splunk 8.2 over TLS - SkipVerify
|
['"os"', '"os/exec"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:anli-CPaasrunOnly-Medium-56248-vector forward logs to splunk 8.2 over TLS - SkipVerify", func() {
oc.SetupProject()
splunkProject := oc.Namespace()
keysPath := filepath.Join("/tmp/temp" + getRandomString())
sp := splunkPodServer{
namespace: splunkProject,
name: "splunk-https",
authType: "tls_serveronly",
version: "8.2",
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/server.key",
certFile: keysPath + "/server.crt",
passphrase: "",
}
sp.init()
// The secret used in CLF to splunk server
clfSecret := toSplunkSecret{
name: "to-splunk-secret-56248",
namespace: splunkProject,
hecToken: sp.hecToken,
caFile: keysPath + "/fake_ca.crt",
keyFile: "",
certFile: "",
passphrase: "",
}
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("generate fake certifate for testing")
defer exec.Command("rm", "-r", keysPath).Output()
err := os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/kube-root-ca.crt", "-n", clfSecret.namespace, "--confirm", "--to="+keysPath).Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = os.Rename(keysPath+"/ca.crt", clfSecret.caFile)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{sp.serviceName, sp.namespace, sp.passphrase}
cert.generateCerts(oc, keysPath)
g.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-56248",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk-serveronly.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=https://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "SKIP_VERIFY=true")
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check logs in splunk")
o.Expect(sp.anyLogFound()).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
7dd1a9bc-f909-4daf-9698-767142698139
|
Author:anli-CPaasrunOnly-Critical-54976-vector forward logs to splunk 9.0 over TLS - ServerOnly
|
['"os"', '"os/exec"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:anli-CPaasrunOnly-Critical-54976-vector forward logs to splunk 9.0 over TLS - ServerOnly", func() {
oc.SetupProject()
splunkProject := oc.Namespace()
keysPath := filepath.Join("/tmp/temp" + getRandomString())
sp := splunkPodServer{
namespace: splunkProject,
name: "splunk-https",
authType: "tls_serveronly",
version: "9.0",
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/server.key",
certFile: keysPath + "/server.crt",
passphrase: "",
}
sp.init()
// The secret used in CLF to splunk server
clfSecret := toSplunkSecret{
name: "to-splunk-secret-55976",
namespace: splunkProject,
hecToken: sp.hecToken,
caFile: keysPath + "/ca.crt",
keyFile: "",
certFile: "",
passphrase: "",
}
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Generate certifcate for testing")
defer exec.Command("rm", "-r", keysPath).Output()
err := os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{sp.serviceName, sp.namespace, sp.passphrase}
cert.generateCerts(oc, keysPath)
g.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-55976",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk-serveronly.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=https://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name)
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check logs in splunk")
o.Expect(sp.allTypeLogsFound()).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
7df2bc35-a207-4171-b326-915ec0ff1a6c
|
Author:anli-CPaasrunOnly-Medium-54978-vector forward logs to splunk 8.2 over TLS - Client Key Passphase
|
['"os"', '"os/exec"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:anli-CPaasrunOnly-Medium-54978-vector forward logs to splunk 8.2 over TLS - Client Key Passphase", func() {
oc.SetupProject()
splunkProject := oc.Namespace()
keysPath := filepath.Join("/tmp/temp" + getRandomString())
sp := splunkPodServer{
namespace: splunkProject,
name: "splunk-https",
authType: "tls_clientauth",
version: "8.2",
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/server.key",
certFile: keysPath + "/server.crt",
passphrase: "aosqetmp",
}
sp.init()
clfSecret := toSplunkSecret{
name: "to-splunk-secret-54978",
namespace: splunkProject,
hecToken: sp.hecToken,
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/client.key",
certFile: keysPath + "/client.crt",
passphrase: sp.passphrase,
}
clf := clusterlogforwarder{
name: "clf-54978",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk-mtls-passphrase.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Generate certifcate for testing")
defer exec.Command("rm", "-r", keysPath).Output()
err := os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{sp.serviceName, sp.namespace, sp.passphrase}
cert.generateCerts(oc, keysPath)
g.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=https://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name)
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check logs in splunk")
o.Expect(sp.anyLogFound()).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
ece49b9d-8fdf-4b6f-b64f-e2881c861821
|
Author:anli-CPaasrunOnly-Medium-54979-vector forward logs to splunk 9.0 over TLS - ClientAuth
|
['"os"', '"os/exec"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:anli-CPaasrunOnly-Medium-54979-vector forward logs to splunk 9.0 over TLS - ClientAuth", func() {
oc.SetupProject()
splunkProject := oc.Namespace()
keysPath := filepath.Join("/tmp/temp" + getRandomString())
sp := splunkPodServer{
namespace: splunkProject,
name: "splunk-https",
authType: "tls_clientauth",
version: "9.0",
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/server.key",
certFile: keysPath + "/server.crt",
passphrase: "",
}
sp.init()
clfSecret := toSplunkSecret{
name: "to-splunk-secret-54979",
namespace: splunkProject,
hecToken: sp.hecToken,
caFile: keysPath + "/ca.crt",
keyFile: keysPath + "/client.key",
certFile: keysPath + "/client.crt",
passphrase: "",
}
clf := clusterlogforwarder{
name: "clf-54979",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk-mtls.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
g.By("Generate certifcate for testing")
defer exec.Command("rm", "-r", keysPath).Output()
err := os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{sp.serviceURL, sp.namespace, ""}
cert.generateCerts(oc, keysPath)
g.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=https://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name)
g.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check logs in splunk")
o.Expect(sp.anyLogFound()).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
85706f9e-bee3-4d8d-aa07-b6071e5f512f
|
Author:anli-CPaasrunOnly-High-71028-Forward logs to Splunk index by setting indexName
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:anli-CPaasrunOnly-High-71028-Forward logs to Splunk index by setting indexName", func() {
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
indexName := "custom-index-" + getRandomString()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
errIndex := sp.createIndexes(oc, indexName)
o.Expect(errIndex).NotTo(o.HaveOccurred())
exutil.By("create log producer")
oc.SetupProject()
appProj := oc.Namespace()
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clfSecret := toSplunkSecret{
name: "splunk-secret-71028",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-71028",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX="+indexName)
exutil.By("check logs in splunk")
for _, logType := range []string{"application", "audit", "infrastructure"} {
o.Expect(sp.checkLogs("index=\""+indexName+"\" log_type=\""+logType+"\"")).To(o.BeTrue(), "can't find "+logType+" logs in "+indexName+" index")
r, e := sp.searchLogs("index=\"main\" log_type=\"" + logType + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find "+logType+" logs in default index, this is not expected")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
9ba8be74-dded-4705-9e1a-eebbae016a47
|
Author:qitang-CPaasrunOnly-High-71029-Forward logs to Splunk indexes by kubernetes.namespace_name[Slow]
|
['"context"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:qitang-CPaasrunOnly-High-71029-Forward logs to Splunk indexes by kubernetes.namespace_name[Slow]", func() {
exutil.By("create log producer")
appProj := oc.Namespace()
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
var indexes []string
namespaces, err := oc.AdminKubeClient().CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
for _, ns := range namespaces.Items {
if ns.Name != "default" {
indexes = append(indexes, ns.Name)
}
}
errIndex := sp.createIndexes(oc, indexes...)
o.Expect(errIndex).NotTo(o.HaveOccurred())
clfSecret := toSplunkSecret{
name: "splunk-secret-71029",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-71029",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX={.kubernetes.namespace_name||\"\"}")
exutil.By("check logs in splunk")
// not all of the projects in cluster have container logs, so here only check some of the projects
// container logs should only be stored in the index named as it's namespace name
for _, index := range []string{appProj, "openshift-cluster-version", "openshift-dns", "openshift-ingress", "openshift-monitoring"} {
o.Expect(sp.checkLogs("index=\""+index+"\"")).To(o.BeTrue(), "can't find logs in "+index+" index")
r, e := sp.searchLogs("index=\"" + index + "\" kubernetes.namespace_name!=\"" + index + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find logs from other namespaces in "+index+" index, this is not expected")
r, e = sp.searchLogs("index!=\"" + index + "\" kubernetes.namespace_name=\"" + index + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find logs from project "+index+" in other indexes, this is not expected")
}
// audit logs and journal logs should be stored in the default index, which is named main
for _, logType := range []string{"audit", "infrastructure"} {
o.Expect(sp.checkLogs("index=\"main\" log_type=\""+logType+"\"")).To(o.BeTrue(), "can't find "+logType+" logs in main index")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
ce8f4c17-884d-47eb-9f60-245201a7f72f
|
Author:qitang-CPaasrunOnly-High-71031-Forward logs to Splunk indexes by openshift.labels
|
['"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:qitang-CPaasrunOnly-High-71031-Forward logs to Splunk indexes by openshift.labels", func() {
exutil.By("create log producer")
appProj := oc.Namespace()
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
index := "multi_splunk-indexes_71031"
errIndex := sp.createIndexes(oc, index)
o.Expect(errIndex).NotTo(o.HaveOccurred())
clfSecret := toSplunkSecret{
name: "splunk-secret-71031",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-71031",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX={.openshift.labels.test||\"\"}")
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "labels", "type": "openshiftLabels", "openshiftLabels": {"test": "` + index + `"}}]}, {"op": "add", "path": "/spec/pipelines/0/filterRefs", "value": ["labels"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
//sleep 10 seconds for collector pods to send logs to splunk
time.Sleep(10 * time.Second)
exutil.By("check logs in splunk")
for _, logType := range []string{"infrastructure", "application", "audit"} {
o.Expect(sp.checkLogs("index=\""+index+"\" log_type=\""+logType+"\"")).To(o.BeTrue(), "can't find "+logType+" logs in "+index+" index")
}
for _, logType := range []string{"application", "infrastructure", "audit"} {
r, e := sp.searchLogs("index=\"main\" log_type=\"" + logType + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find "+logType+" logs in default index, this is not expected")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
507c9f19-0b07-40e9-b3d3-4c5f44296fb8
|
Author:qitang-CPaasrunOnly-Medium-71035-Forward logs to Splunk indexes by kubernetes.labels
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:qitang-CPaasrunOnly-Medium-71035-Forward logs to Splunk indexes by kubernetes.labels", func() {
exutil.By("create log producer")
appProj := oc.Namespace()
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate, "-p", `LABELS={"test-logging": "logging-OCP_71035", "test.logging.io/logging.qe-test-label": "logging-OCP_71035"}`).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
index := "logging-OCP_71035"
errIndex := sp.createIndexes(oc, index)
o.Expect(errIndex).NotTo(o.HaveOccurred())
clfSecret := toSplunkSecret{
name: "splunk-secret-71035",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-71035",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX={.kubernetes.labels.\"test.logging.io/logging.qe-test-label\"||\"\"}")
exutil.By("check logs in splunk")
// logs from project appProj should be stored in 'logging-OCP_71035', other logs should be in default index
o.Expect(sp.checkLogs("index=\""+index+"\"")).To(o.BeTrue(), "can't find logs in "+index+" index")
r, e := sp.searchLogs("index=\"" + index + "\", kubernetes.namespace_name!=\"" + appProj + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find logs from other namespaces in "+index+" index, this is not expected")
r, e = sp.searchLogs("index!=\"" + index + "\", kubernetes.namespace_name=\"" + appProj + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find logs from project "+appProj+" in other indexes, this is not expected")
for _, logType := range []string{"audit", "infrastructure"} {
o.Expect(sp.checkLogs("index=\"main\", log_type=\""+logType+"\"")).To(o.BeTrue(), "can't find "+logType+" logs in main index")
r, e := sp.searchLogs("index=\"" + index + "\", log_type=\"" + logType + "\"")
o.Expect(e).NotTo(o.HaveOccurred())
o.Expect(len(r.Results) == 0).Should(o.BeTrue(), "find "+logType+" logs in "+index+" index, this is not expected")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
02dec57c-5373-4a2b-afb7-a9888ef85939
|
Author:anli-CPaasrunOnly-High-75234-logs fallback to default splunk index if template syntax can not be found
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:anli-CPaasrunOnly-High-75234-logs fallback to default splunk index if template syntax can not be found", func() {
exutil.By("create log producer")
appProj := oc.Namespace()
josnLogTemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", josnLogTemplate, "-p", "LABELS={\"test-logging\": \"logging-OCP-71322\"}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
o.Expect(sp.createIndexes(oc, appProj)).NotTo(o.HaveOccurred())
o.Expect(sp.createIndexes(oc, "openshift-operator-lifecycle-manager")).NotTo(o.HaveOccurred())
clfSecret := toSplunkSecret{
name: "splunk-secret-71322",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-71322",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX={.kubernetes.namespace_name||\"main\"}")
exutil.By("verify logs can are found in namespace_name index")
allFound := true
for _, logIndex := range []string{appProj, "openshift-operator-lifecycle-manager"} {
if sp.checkLogs("index=" + logIndex) {
e2e.Logf("found logs in index %s", logIndex)
} else {
e2e.Logf("can not find logs in index %s", logIndex)
allFound = false
}
}
o.Expect(allFound).To(o.BeTrue(), "can't find some logs in namespace_name index ")
exutil.By("verify infra and audit logs are send to main index")
allFound = true
for _, logType := range []string{"audit", "infrastructure"} {
if sp.checkLogs(`index="main" log_type="` + logType + `"`) {
e2e.Logf("found logs %s in index main", logType)
} else {
e2e.Logf("Can not find logs %s in index main ", logType)
allFound = false
}
}
o.Expect(allFound).To(o.BeTrue(), "can't find some type of logs in main index")
})
| |||||
test case
|
openshift/openshift-tests-private
|
72d7cafe-f4f4-44c4-8c08-fab602109809
|
Author:anli-CPaasrunOnly-Critical-68303-mCLF Inputs.receiver.http multiple Inputs.receivers to splunk
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:anli-CPaasrunOnly-Critical-68303-mCLF Inputs.receiver.http multiple Inputs.receivers to splunk", func() {
clfNS := oc.Namespace()
splunkProject := clfNS
g.By("Deploy splunk server")
//define splunk deployment
sp := splunkPodServer{
namespace: splunkProject,
name: "splunk-http",
authType: "http",
version: "9.0",
}
sp.init()
defer sp.destroy(oc)
sp.deploy(oc)
g.By("create clusterlogforwarder/instance")
// The secret used in CLF to splunk server
clfSecret := toSplunkSecret{
name: "to-splunk-secret-68303",
namespace: clfNS,
hecToken: sp.hecToken,
caFile: "",
keyFile: "",
certFile: "",
passphrase: "",
}
defer clfSecret.delete(oc)
clfSecret.create(oc)
clf := clusterlogforwarder{
name: "http-to-splunk",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "httpserver-to-splunk.yaml"),
secretName: clfSecret.name,
serviceAccountName: "clf-" + getRandomString(),
waitForPodReady: true,
collectAuditLogs: false,
collectApplicationLogs: false,
collectInfrastructureLogs: false,
}
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088")
g.By("send data to httpservers")
o.Expect(postDataToHttpserver(oc, clfNS, "https://"+clf.name+"-httpserver1."+clfNS+".svc:8081", `{"test data" : "from httpserver1"}`)).To(o.BeTrue())
o.Expect(postDataToHttpserver(oc, clfNS, "https://"+clf.name+"-httpserver2."+clfNS+".svc:8082", `{"test data" : "from httpserver2"}`)).To(o.BeTrue())
o.Expect(postDataToHttpserver(oc, clfNS, "https://"+clf.name+"-httpserver3."+clfNS+".svc:8083", `{"test data" : "from httpserver3"}`)).To(o.BeTrue())
g.By("check logs in splunk")
o.Expect(sp.auditLogFound()).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
5c7eb4f2-bad6-405e-8501-a3c45c9d9a9d
|
Author:anli-CPaasrunOnly-Medium-75386-ClusterLogForwarder input validation testing.
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:anli-CPaasrunOnly-Medium-75386-ClusterLogForwarder input validation testing.", func() {
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
clfSecret := toSplunkSecret{
name: "splunk-secret-75386",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-75386",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name, "INDEX={.kubernetes.non_existing.key||\"\"}")
exutil.By("update CLF to set invalid glob for namespace")
patch := `[{"op":"add","path":"/spec/inputs","value":[{"name":"new-app","type":"application","application":{"excludes":[{"namespace":"invalid-name@"}],"includes":[{"namespace":"tes*t"}]}}]},{"op":"replace","path":"/spec/pipelines/0/inputRefs","value":["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, "globs must match", []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.inputConditions[0].message}"})
exutil.By("update CLF to set invalid sources for infrastructure logs")
patch = `[{"op":"replace","path":"/spec/inputs","value":[{"name":"selected-infra","type":"infrastructure","infrastructure":{"sources":["nodesd","containersf"]}}]},{"op":"replace","path":"/spec/pipelines/0/inputRefs","value":["selected-infra"]}]`
outString, _ := clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75386" is invalid`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `Unsupported value: "nodesd": supported values: "container", "node"`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `Unsupported value: "containersf": supported values: "container", "node"`)).To(o.BeTrue())
exutil.By("update CLF to set invalid sources for audit logs")
patch = `[{"op":"replace","path":"/spec/pipelines/0/inputRefs","value":["selected-audit"]},{"op":"replace","path":"/spec/inputs","value":[{"name":"selected-audit","type":"audit","audit":{"sources":["nodess","containersf"]}}]}]`
outString, _ = clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75386" is invalid`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `Unsupported value: "nodess": supported values: "auditd", "kubeAPI", "openshiftAPI", "ovn"`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `Unsupported value: "containersf": supported values: "auditd", "kubeAPI", "openshiftAPI", "ovn"`)).To(o.BeTrue())
exutil.By("update CLF to use string as matchExpressions values")
patch = `[{"op":"replace","path":"/spec/inputs/0/application","value":{"selector":{"matchExpressions":[{"key":"test.logging.io/logging.qe-test-label","operator":"Exists","values":"logging-71749-test-1"}]}}}]`
outString, _ = clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75386" is invalid`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `spec.inputs[0].application.selector.matchExpressions[0].values: Invalid value: "string"`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `spec.inputs[0].application.selector.matchExpressions[0].values in body must be of type array: "string"`)).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
10a3ddfa-9476-4360-8fa1-d2849c5610ad
|
Author:qitang-CPaasrunOnly-Medium-75390-CLF should be rejected and show error message if the filters are invalid
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/logging/vector_splunk.go
|
g.It("Author:qitang-CPaasrunOnly-Medium-75390-CLF should be rejected and show error message if the filters are invalid", func() {
splunkProject := oc.Namespace()
sp := splunkPodServer{
namespace: splunkProject,
name: "default-http",
authType: "http",
version: "9.0",
}
sp.init()
exutil.By("Deploy splunk")
defer sp.destroy(oc)
sp.deploy(oc)
clfSecret := toSplunkSecret{
name: "splunk-secret-75390",
namespace: splunkProject,
hecToken: sp.hecToken,
}
clf := clusterlogforwarder{
name: "clf-75390",
namespace: splunkProject,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "splunk.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
exutil.By("create clusterlogforwarder")
defer clfSecret.delete(oc)
clfSecret.create(oc)
defer clf.delete(oc)
clf.create(oc, "URL=http://"+sp.serviceURL+":8088", "SECRET_NAME="+clfSecret.name)
exutil.By("verfy clf without drop spec is rejected")
patch := `[{"op":"add","path":"/spec/filters","value":[{"name":"drop-logs","type":"drop"}]},{"op":"add","path":"/spec/pipelines/0/filterRefs","value":["drop-logs"]}]`
outString, _ := clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75390" is invalid:`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `invalid: spec.filters[0]: Invalid value: "object": Additional type specific spec is required for the filter type`)).To(o.BeTrue())
exutil.By("verfy clf with invalid drop fileds is rejected")
patch = `[{"op":"add","path":"/spec/filters","value":[{"name":"drop-logs","type":"drop","drop":[{"test":[{"field":".kubernetes.labels.test.logging.io/logging.qe-test-label","matches":".+"}]}]}]},{"op":"add","path":"/spec/pipelines/0/filterRefs","value":["drop-logs"]}]`
outString, _ = clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75390" is invalid:`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `spec.filters[0].drop[0].test[0].field: Invalid value: ".kubernetes.labels.test.logging.io/logging.qe-test-label"`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `spec.filters[0].drop[0].test[0].field in body should match '^(\.[a-zA-Z0-9_]+|\."[^"]+")(\.[a-zA-Z0-9_]+|\."[^"]+")*$`)).To(o.BeTrue())
exutil.By("verify CLF without prune spec is rejected")
patch = `[{"op":"add","path":"/spec/filters", "value": [{"name": "prune-logs", "type": "prune"}]},{"op":"add","path":"/spec/pipelines/0/filterRefs","value":["prune-logs"]}]`
outString, _ = clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75390" is invalid:`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, ` Invalid value: "object": Additional type specific spec is required for the filter type`)).To(o.BeTrue())
exutil.By("verify CLF with invalid prune value is rejected")
patch = `[{"op":"add","path":"/spec/filters","value":[{"name":"prune-logs","type":"prune","prune":{"in":[".kubernetes.namespace_labels.pod-security.kubernetes.io/audit",".file",".kubernetes.annotations"]}}]},{"op":"add","path":"/spec/pipelines/0/filterRefs","value":["prune-logs"]}]`
outString, _ = clf.patch(oc, patch)
o.Expect(strings.Contains(outString, `The ClusterLogForwarder "clf-75390" is invalid:`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `Invalid value: ".kubernetes.namespace_labels.pod-security.kubernetes.io/audit"`)).To(o.BeTrue())
o.Expect(strings.Contains(outString, `body should match '^(\.[a-zA-Z0-9_]+|\."[^"]+")(\.[a-zA-Z0-9_]+|\."[^"]+")*$'`)).To(o.BeTrue())
exutil.By("verify filtersStatus show error when prune fields include .log_type, .message or .log_source")
patch = `[{"op":"add","path":"/spec/filters","value":[{"name":"prune-logs","prune":{"in":[".log_type",".message",".log_source"]},"type":"prune"}]},{"op":"add","path":"/spec/pipelines/0/filterRefs","value":["prune-logs"]}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, `prune-logs: [[".log_type" ".message" ".log_source"] is/are required fields and must be removed from the`+" `in` list.]", []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.filterConditions[0].message}"})
patch = `[{"op":"replace","path":"/spec/filters","value":[{"name":"prune-logs","prune":{"notIn":[".kubernetes",".\"@timestamp\"",".openshift",".hostname"]},"type":"prune"}]}]`
clf.update(oc, "", patch, "--type=json")
checkResource(oc, true, false, `prune-logs: [[".log_source" ".log_type" ".message"] is/are required fields and must be included in`+" the `notIn` list.]", []string{"clusterlogforwarder.observability.openshift.io", clf.name, "-n", clf.namespace, "-ojsonpath={.status.filterConditions[0].message}"})
})
| |||||
file
|
openshift/openshift-tests-private
|
85374c1c-702e-4c6e-81de-fd43dd716f69
|
configmap
|
import (
"fmt"
"encoding/json"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
package mco
import (
"fmt"
"encoding/json"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
// ConfigMap struct encapsulates the functionalities regarding ocp configmaps
type ConfigMap struct {
Resource
}
// ConfigMapList handles list of ConfigMap
type ConfigMapList struct {
ResourceList
}
// NewConfigMap creates a Secret struct
func NewConfigMap(oc *exutil.CLI, namespace, name string) *ConfigMap {
return &ConfigMap{Resource: *NewNamespacedResource(oc, "ConfigMap", namespace, name)}
}
// NewConfigMapList creates a Secret struct
func NewConfigMapList(oc *exutil.CLI, namespace string) *ConfigMapList {
return &ConfigMapList{ResourceList: *NewNamespacedResourceList(oc, "ConfigMap", namespace)}
}
// HasKey returns if a key is present in "data"
func (cm *ConfigMap) HasKey(key string) (string, bool, error) {
dataMap, err := cm.GetDataMap()
if err != nil {
return "", false, err
}
data, ok := dataMap[key]
if !ok {
return "", false, nil
}
return data, true, nil
}
// GetDataValue return the value of a key stored in "data".
func (cm *ConfigMap) GetDataValue(key string) (string, error) {
// We cant use the "resource.Get" method, because exutil.client will trim the output, removing spaces and newlines that could be important in a configuration.
dataMap, err := cm.GetDataMap()
if err != nil {
return "", err
}
data, ok := dataMap[key]
if !ok {
return "", fmt.Errorf("Key %s does not exist in the .data in Configmap -n %s %s",
key, cm.GetNamespace(), cm.GetName())
}
return data, nil
}
// GetDataMap returns the valus in the .data field as a map[string][string]
func (cm *ConfigMap) GetDataMap() (map[string]string, error) {
data := map[string]string{}
dataJSON, err := cm.Get(`{.data}`)
if err != nil {
return nil, err
}
if err := json.Unmarshal([]byte(dataJSON), &data); err != nil {
return nil, err
}
return data, nil
}
// GetDataValueOrFail return the value of a key stored in "data" and fails the test if the value cannot be retreived. If the "key" does not exist, it returns an empty string but does not fail
func (cm *ConfigMap) GetDataValueOrFail(key string) string {
value, err := cm.GetDataValue(key)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(),
"Could get the value for key %s in configmap -n %s %s",
key, cm.GetNamespace(), cm.GetName())
return value
}
// SetData update the configmap with the given values. Same as "oc set data cm/..."
func (cm *ConfigMap) SetData(arg string, args ...string) error {
params := []string{"data"}
params = append(params, cm.Resource.getCommonParams()...)
params = append(params, arg)
if len(args) > 0 {
params = append(params, args...)
}
return cm.Resource.oc.WithoutNamespace().Run("set").Args(params...).Execute()
}
// RemoveDataKey removes a key from the configmap data values
func (cm *ConfigMap) RemoveDataKey(key string) error {
return cm.Patch("json", `[{"op": "remove", "path": "/data/`+key+`"}]`)
}
// CreateConfigMapWithRandomCert creates a configmap that stores a random CA in it
func CreateConfigMapWithRandomCert(oc *exutil.CLI, cmNamespace, cmName, certKey string) (*ConfigMap, error) {
_, caPath, err := createCA(createTmpDir(), certKey)
if err != nil {
return nil, err
}
err = oc.WithoutNamespace().Run("create").Args("cm", "-n", cmNamespace, cmName, "--from-file", caPath).Execute()
if err != nil {
return nil, err
}
return NewConfigMap(oc, cmNamespace, cmName), nil
}
// GetCloudProviderConfigMap will return the CloudProviderConfigMap or nil if it is not defined in the infrastructure resource
func GetCloudProviderConfigMap(oc *exutil.CLI) *ConfigMap {
infra := NewResource(oc, "infrastructure", "cluster")
cmName := infra.GetOrFail(`{.spec.cloudConfig.name}`)
if cmName == "" {
logger.Infof("CloudProviderConfig ConfigMap is not defined in the infrastructure resource: %s", infra.PrettyString())
return nil
}
return NewConfigMap(oc, "openshift-config", cmName)
}
// GetAll returns a []ConfigMap list with all existing pinnedimageset sorted by creation timestamp
func (cml *ConfigMapList) GetAll() ([]ConfigMap, error) {
cml.ResourceList.SortByTimestamp()
allResources, err := cml.ResourceList.GetAll()
if err != nil {
return nil, err
}
all := make([]ConfigMap, 0, len(allResources))
for _, res := range allResources {
all = append(all, *NewConfigMap(cml.oc, res.namespace, res.name))
}
return all, nil
}
// GetAllOrFail returns a []ConfigMap list with all existing pinnedimageset sorted by creation time, if any error happens it fails the test
func (cml *ConfigMapList) GetAllOrFail() []ConfigMap {
all, err := cml.GetAll()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error getting the list of existing ConfigMap in the cluster")
return all
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
3d3e94de-acf0-44bc-96c4-c2f74c13cabb
|
NewConfigMap
|
['ConfigMap']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func NewConfigMap(oc *exutil.CLI, namespace, name string) *ConfigMap {
return &ConfigMap{Resource: *NewNamespacedResource(oc, "ConfigMap", namespace, name)}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d54e49cc-3928-4e10-ae28-e4f3b777e1d9
|
NewConfigMapList
|
['ConfigMap', 'ConfigMapList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func NewConfigMapList(oc *exutil.CLI, namespace string) *ConfigMapList {
return &ConfigMapList{ResourceList: *NewNamespacedResourceList(oc, "ConfigMap", namespace)}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
148c2971-f75e-4ac9-9864-0a65d6750b0f
|
HasKey
|
['ConfigMap']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func (cm *ConfigMap) HasKey(key string) (string, bool, error) {
dataMap, err := cm.GetDataMap()
if err != nil {
return "", false, err
}
data, ok := dataMap[key]
if !ok {
return "", false, nil
}
return data, true, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
7893bd5b-b10d-42b2-aded-1567eb96c45e
|
GetDataValue
|
['"fmt"']
|
['ConfigMap']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func (cm *ConfigMap) GetDataValue(key string) (string, error) {
// We cant use the "resource.Get" method, because exutil.client will trim the output, removing spaces and newlines that could be important in a configuration.
dataMap, err := cm.GetDataMap()
if err != nil {
return "", err
}
data, ok := dataMap[key]
if !ok {
return "", fmt.Errorf("Key %s does not exist in the .data in Configmap -n %s %s",
key, cm.GetNamespace(), cm.GetName())
}
return data, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
ae7af71b-5b43-4983-ab95-3dd6228cd095
|
GetDataMap
|
['"encoding/json"']
|
['ConfigMap']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func (cm *ConfigMap) GetDataMap() (map[string]string, error) {
data := map[string]string{}
dataJSON, err := cm.Get(`{.data}`)
if err != nil {
return nil, err
}
if err := json.Unmarshal([]byte(dataJSON), &data); err != nil {
return nil, err
}
return data, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
5f1c50ae-035e-4597-9d13-0139cd53169d
|
GetDataValueOrFail
|
['ConfigMap']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func (cm *ConfigMap) GetDataValueOrFail(key string) string {
value, err := cm.GetDataValue(key)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(),
"Could get the value for key %s in configmap -n %s %s",
key, cm.GetNamespace(), cm.GetName())
return value
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
7d995f41-cb6f-44cc-ad12-1829595fb566
|
SetData
|
['ConfigMap']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func (cm *ConfigMap) SetData(arg string, args ...string) error {
params := []string{"data"}
params = append(params, cm.Resource.getCommonParams()...)
params = append(params, arg)
if len(args) > 0 {
params = append(params, args...)
}
return cm.Resource.oc.WithoutNamespace().Run("set").Args(params...).Execute()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
abf51f2f-05c6-4fc7-8c42-09f52820bc54
|
RemoveDataKey
|
['"encoding/json"']
|
['ConfigMap']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func (cm *ConfigMap) RemoveDataKey(key string) error {
return cm.Patch("json", `[{"op": "remove", "path": "/data/`+key+`"}]`)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
26a0e8d0-1cfa-4a5c-b815-8e3cd3ae56f9
|
CreateConfigMapWithRandomCert
|
['ConfigMap']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func CreateConfigMapWithRandomCert(oc *exutil.CLI, cmNamespace, cmName, certKey string) (*ConfigMap, error) {
_, caPath, err := createCA(createTmpDir(), certKey)
if err != nil {
return nil, err
}
err = oc.WithoutNamespace().Run("create").Args("cm", "-n", cmNamespace, cmName, "--from-file", caPath).Execute()
if err != nil {
return nil, err
}
return NewConfigMap(oc, cmNamespace, cmName), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
58d3e69e-5a23-438d-9715-89f2267eff1c
|
GetCloudProviderConfigMap
|
['ConfigMap']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func GetCloudProviderConfigMap(oc *exutil.CLI) *ConfigMap {
infra := NewResource(oc, "infrastructure", "cluster")
cmName := infra.GetOrFail(`{.spec.cloudConfig.name}`)
if cmName == "" {
logger.Infof("CloudProviderConfig ConfigMap is not defined in the infrastructure resource: %s", infra.PrettyString())
return nil
}
return NewConfigMap(oc, "openshift-config", cmName)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
7ca9ce48-c096-4785-8d26-bba6eb8ea7f6
|
GetAll
|
['ConfigMap', 'ConfigMapList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func (cml *ConfigMapList) GetAll() ([]ConfigMap, error) {
cml.ResourceList.SortByTimestamp()
allResources, err := cml.ResourceList.GetAll()
if err != nil {
return nil, err
}
all := make([]ConfigMap, 0, len(allResources))
for _, res := range allResources {
all = append(all, *NewConfigMap(cml.oc, res.namespace, res.name))
}
return all, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
f26c89b9-1bf0-44ad-9fef-82fbe0ef29a5
|
GetAllOrFail
|
['ConfigMap', 'ConfigMapList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/configmap.go
|
func (cml *ConfigMapList) GetAllOrFail() []ConfigMap {
all, err := cml.GetAll()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error getting the list of existing ConfigMap in the cluster")
return all
}
|
mco
| ||||
file
|
openshift/openshift-tests-private
|
eb4d8c71-3c57-4f66-aa92-4bedf3d10d28
|
containerruntimeconfig
|
import (
"fmt"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/containerruntimeconfig.go
|
package mco
import (
"fmt"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
// ContainerRuntimeConfig struct is used to handle ContainerRuntimeConfig resources in OCP
type ContainerRuntimeConfig struct {
Resource
template string
}
// ContainerRuntimeConfigList handles list of nodes
type ContainerRuntimeConfigList struct {
ResourceList
}
// NewContainerRuntimeConfig creates a ContainerRuntimeConfig struct
func NewContainerRuntimeConfig(oc *exutil.CLI, name, template string) *ContainerRuntimeConfig {
return &ContainerRuntimeConfig{Resource: *NewResource(oc, "ContainerRuntimeConfig", name), template: template}
}
// NewContainerRuntimeConfigList create a NewKubeletConfigList struct
func NewContainerRuntimeConfigList(oc *exutil.CLI) *ContainerRuntimeConfigList {
return &ContainerRuntimeConfigList{*NewResourceList(oc, "ContainerRuntimeConfig")}
}
// TODO: Refactor this strutc remove this method and embed Template
func (cr *ContainerRuntimeConfig) create(parameters ...string) {
allParams := []string{"--ignore-unknown-parameters=true", "-f", cr.template,
"-p", "NAME=" + cr.name}
allParams = append(allParams, parameters...)
exutil.CreateClusterResourceFromTemplate(cr.oc, allParams...)
}
func (cr ContainerRuntimeConfig) waitUntilSuccess(timeout string) {
logger.Infof("wait for %s to report success", cr.name)
o.Eventually(func() map[string]interface{} {
successCond := JSON(cr.GetConditionByType("Success"))
if successCond.Exists() {
return successCond.ToMap()
}
logger.Infof("success condition not found, conditions are %s", cr.GetOrFail(`{.status.conditions}`))
return nil
},
timeout, "2s").Should(o.SatisfyAll(o.HaveKeyWithValue("status", "True"),
o.HaveKeyWithValue("message", "Success")))
}
func (cr ContainerRuntimeConfig) waitUntilFailure(expectedMsg, timeout string) {
logger.Infof("wait for %s to report failure", cr.name)
o.Eventually(func() map[string]interface{} {
failureCond := JSON(cr.GetConditionByType("Failure"))
if failureCond.Exists() {
return failureCond.ToMap()
}
logger.Infof("Failure condition not found, conditions are %s", cr.GetOrFail(`{.status.conditions}`))
return nil
},
timeout, "2s").Should(o.SatisfyAll(o.HaveKeyWithValue("status", "False"), o.HaveKeyWithValue("message", o.ContainSubstring(expectedMsg))))
}
// GetGeneratedMCName returns the name of the MC that was generated by this ContainerRuntimeConfig resource
func (cr ContainerRuntimeConfig) GetGeneratedMCName() (string, error) {
mcName, err := cr.Get(`{.metadata.finalizers[0]}`)
if err != nil {
return "", err
}
if mcName == "" {
return "", fmt.Errorf("It was not possible to get the finalizer from %s %s: %s", cr.GetKind(), cr.GetName(), cr.PrettyString())
}
return mcName, nil
}
// GetGeneratedMCName returns the name of the MC that was generated by this ContainerRuntimeConfig resource and fails the test case if it cannot be done
func (cr ContainerRuntimeConfig) GetGeneratedMCNameOrFail() string {
mcName, err := cr.GetGeneratedMCName()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the generated MC for %s %s", cr.GetKind(), cr.GetName())
return mcName
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
97f55929-8994-4ba5-b646-eec24d1e4e30
|
NewContainerRuntimeConfig
|
['ContainerRuntimeConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containerruntimeconfig.go
|
func NewContainerRuntimeConfig(oc *exutil.CLI, name, template string) *ContainerRuntimeConfig {
return &ContainerRuntimeConfig{Resource: *NewResource(oc, "ContainerRuntimeConfig", name), template: template}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
f7496b6b-bf60-40e2-a3f7-2e8f08f88992
|
NewContainerRuntimeConfigList
|
['ContainerRuntimeConfig', 'ContainerRuntimeConfigList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containerruntimeconfig.go
|
func NewContainerRuntimeConfigList(oc *exutil.CLI) *ContainerRuntimeConfigList {
return &ContainerRuntimeConfigList{*NewResourceList(oc, "ContainerRuntimeConfig")}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
985d3f84-bb50-4b6f-a0e5-6b91e9a795bc
|
create
|
['ContainerRuntimeConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containerruntimeconfig.go
|
func (cr *ContainerRuntimeConfig) create(parameters ...string) {
allParams := []string{"--ignore-unknown-parameters=true", "-f", cr.template,
"-p", "NAME=" + cr.name}
allParams = append(allParams, parameters...)
exutil.CreateClusterResourceFromTemplate(cr.oc, allParams...)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
be5c98ff-3abb-4763-b280-9e14bca25b11
|
waitUntilSuccess
|
['ContainerRuntimeConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containerruntimeconfig.go
|
func (cr ContainerRuntimeConfig) waitUntilSuccess(timeout string) {
logger.Infof("wait for %s to report success", cr.name)
o.Eventually(func() map[string]interface{} {
successCond := JSON(cr.GetConditionByType("Success"))
if successCond.Exists() {
return successCond.ToMap()
}
logger.Infof("success condition not found, conditions are %s", cr.GetOrFail(`{.status.conditions}`))
return nil
},
timeout, "2s").Should(o.SatisfyAll(o.HaveKeyWithValue("status", "True"),
o.HaveKeyWithValue("message", "Success")))
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
f9605ca6-627d-4c4b-b751-0180bd3e7e1d
|
waitUntilFailure
|
['ContainerRuntimeConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containerruntimeconfig.go
|
func (cr ContainerRuntimeConfig) waitUntilFailure(expectedMsg, timeout string) {
logger.Infof("wait for %s to report failure", cr.name)
o.Eventually(func() map[string]interface{} {
failureCond := JSON(cr.GetConditionByType("Failure"))
if failureCond.Exists() {
return failureCond.ToMap()
}
logger.Infof("Failure condition not found, conditions are %s", cr.GetOrFail(`{.status.conditions}`))
return nil
},
timeout, "2s").Should(o.SatisfyAll(o.HaveKeyWithValue("status", "False"), o.HaveKeyWithValue("message", o.ContainSubstring(expectedMsg))))
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
007328e4-ca9d-4cba-8ead-a8affddf2934
|
GetGeneratedMCName
|
['"fmt"']
|
['ContainerRuntimeConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containerruntimeconfig.go
|
func (cr ContainerRuntimeConfig) GetGeneratedMCName() (string, error) {
mcName, err := cr.Get(`{.metadata.finalizers[0]}`)
if err != nil {
return "", err
}
if mcName == "" {
return "", fmt.Errorf("It was not possible to get the finalizer from %s %s: %s", cr.GetKind(), cr.GetName(), cr.PrettyString())
}
return mcName, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
df8b80a6-cc36-4ca4-846f-d3dea3f55869
|
GetGeneratedMCNameOrFail
|
['ContainerRuntimeConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containerruntimeconfig.go
|
func (cr ContainerRuntimeConfig) GetGeneratedMCNameOrFail() string {
mcName, err := cr.GetGeneratedMCName()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the generated MC for %s %s", cr.GetKind(), cr.GetName())
return mcName
}
|
mco
| ||||
file
|
openshift/openshift-tests-private
|
15f991e3-22c7-422d-a8bf-fb768c2efe71
|
containers
|
import (
"encoding/base32"
"fmt"
"hash/fnv"
"os"
"path/filepath"
"strings"
"time"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
"github.com/google/uuid"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
container "github.com/openshift/openshift-tests-private/test/extended/util/container"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
package mco
import (
"encoding/base32"
"fmt"
"hash/fnv"
"os"
"path/filepath"
"strings"
"time"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
"github.com/google/uuid"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
container "github.com/openshift/openshift-tests-private/test/extended/util/container"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// OsImageBuilder encapsulates the functionality to build custom osImage in the machine running the testcase
type OsImageBuilder struct {
oc *exutil.CLI
architecture architecture.Architecture
baseImage,
osImage,
dockerFileCommands, // Full docker file but the "FROM basOsImage..." that will be calculated
dockerConfig,
tmpDir string
cleanupRegistryRoute,
UseInternalRegistry bool
}
func (b *OsImageBuilder) prepareEnvironment() error {
var err error
if b.dockerConfig == "" {
logger.Infof("No docker config file was provided to the osImage builder. Generating a new docker config file")
exutil.By("Extract pull-secret")
pullSecret := GetPullSecret(b.oc.AsAdmin())
tokenDir, err := pullSecret.Extract()
if err != nil {
return fmt.Errorf("Error extracting pull-secret. Error: %s", err)
}
logger.Infof("Pull secret has been extracted to: %s\n", tokenDir)
b.dockerConfig = filepath.Join(tokenDir, ".dockerconfigjson")
}
logger.Infof("Using docker config file: %s\n", b.dockerConfig)
b.architecture = architecture.ClusterArchitecture(b.oc)
logger.Infof("Building using architecture: %s", b.architecture)
b.baseImage, err = getImageFromReleaseInfo(b.oc.AsAdmin(), LayeringBaseImageReleaseInfo, b.dockerConfig)
if err != nil {
return fmt.Errorf("Error getting the base image to build new osImages. Error: %s", err)
}
if b.UseInternalRegistry {
if err := b.preparePushToInternalRegistry(); err != nil {
return err
}
} else if b.osImage == "" {
uniqueTag, err := generateUniqueTag(b.oc.AsAdmin(), b.baseImage)
if err != nil {
return err
}
b.osImage = getLayeringTestImageRepository(uniqueTag)
}
logger.Infof("Building image: %s", b.osImage)
if b.tmpDir == "" {
b.tmpDir = e2e.TestContext.OutputDir
}
return nil
}
func (b *OsImageBuilder) preparePushToInternalRegistry() error {
exposed, expErr := b.oc.Run("get").Args("configs.imageregistry.operator.openshift.io", "cluster", `-ojsonpath={.spec.defaultRoute}`).Output()
if expErr != nil {
return fmt.Errorf("Error getting internal registry configuration. Error: %s", expErr)
}
if !IsTrue(exposed) {
b.cleanupRegistryRoute = true
logger.Infof("The internal registry service is not exposed. Exposing internal registry service...")
expErr := b.oc.Run("patch").Args("configs.imageregistry.operator.openshift.io/cluster", "--patch", `{"spec":{"defaultRoute":true}}`, "--type=merge").Execute()
if expErr != nil {
return fmt.Errorf("Error exposing internal registry. Error: %s", expErr)
}
}
logger.Infof("Create namespace to store the service account to access the internal registry")
nsExistsErr := b.oc.Run("get").Args("namespace", layeringTestsTmpNamespace).Execute()
if nsExistsErr != nil {
err := b.oc.Run("create").Args("namespace", layeringTestsTmpNamespace).Execute()
if err != nil {
return fmt.Errorf("Error creating namespace %s to store the layering imagestreams. Error: %s",
layeringTestsTmpNamespace, err)
}
} else {
logger.Infof("Namespace %s already exists. Skip namespace creation", layeringTestsTmpNamespace)
}
logger.Infof("Create service account with registry admin permissions to store the imagestream")
saExistsErr := b.oc.Run("get").Args("-n", layeringTestsTmpNamespace, "serviceaccount", layeringRegistryAdminSAName).Execute()
if saExistsErr != nil {
cErr := b.oc.Run("create").Args("-n", layeringTestsTmpNamespace, "serviceaccount", layeringRegistryAdminSAName).Execute()
if cErr != nil {
return fmt.Errorf("Error creating ServiceAccount %s/%s: %s", layeringTestsTmpNamespace, layeringRegistryAdminSAName, cErr)
}
} else {
logger.Infof("SA %s/%s already exists. Skip SA creation", layeringTestsTmpNamespace, layeringRegistryAdminSAName)
}
admErr := b.oc.Run("adm").Args("-n", layeringTestsTmpNamespace, "policy", "add-cluster-role-to-user", "registry-admin", "-z", layeringRegistryAdminSAName).Execute()
if admErr != nil {
return fmt.Errorf("Error creating ServiceAccount %s: %s", layeringRegistryAdminSAName, admErr)
}
logger.Infof("Get SA token")
saToken, err := b.oc.Run("create").Args("-n", layeringTestsTmpNamespace, "token", layeringRegistryAdminSAName).Output()
if err != nil {
logger.Errorf("Error getting token for SA %s", layeringRegistryAdminSAName)
return err
}
logger.Debugf("SA TOKEN: %s", saToken)
logger.Infof("OK!\n")
logger.Infof("Get current internal registry route")
internalRegistryURL, routeErr := b.oc.Run("get").Args("route", "default-route", "-n", "openshift-image-registry", "--template", `{{ .spec.host }}`).Output()
if routeErr != nil {
return fmt.Errorf("Error getting internal registry's route. Ourput: %s\nError: %s", internalRegistryURL, routeErr)
}
logger.Infof("Current internal registry route: %s", internalRegistryURL)
uniqueTag, err := generateUniqueTag(b.oc.AsAdmin(), b.baseImage)
if err != nil {
return err
}
b.osImage = fmt.Sprintf("%s/%s/%s:%s", internalRegistryURL, MachineConfigNamespace, "layering", uniqueTag)
logger.Infof("Using image: %s", b.osImage)
logger.Infof("Loging as registry admin to internal registry")
podmanCLI := container.NewPodmanCLI()
loginOut, loginErr := podmanCLI.Run("login").Args(internalRegistryURL, "-u", layeringRegistryAdminSAName, "-p", saToken, "--tls-verify=false", "--authfile", b.dockerConfig).Output()
if loginErr != nil {
return fmt.Errorf("Error trying to login to internal registry:\nOutput:%s\nError:%s", loginOut, loginErr)
}
logger.Infof("OK!\n")
return nil
}
// CleanUp will clean up all the helper resources created by the builder
func (b *OsImageBuilder) CleanUp() error {
logger.Infof("Cleanup image builder resources")
if b.UseInternalRegistry {
logger.Infof("Removing namespace %s", layeringTestsTmpNamespace)
err := b.oc.Run("delete").Args("namespace", layeringTestsTmpNamespace, "--ignore-not-found").Execute()
if err != nil {
return fmt.Errorf("Error deleting namespace %s. Error: %s",
layeringTestsTmpNamespace, err)
}
if b.cleanupRegistryRoute {
logger.Infof("The internal registry route was exposed. Remove the exposed internal registry route to restore initial state.")
expErr := b.oc.Run("patch").Args("configs.imageregistry.operator.openshift.io/cluster", "--patch", `{"spec":{"defaultRoute":false}}`, "--type=merge").Execute()
if expErr != nil {
return fmt.Errorf("Error exposing internal registry. Error: %s", expErr)
}
}
} else {
logger.Infof("Not using internal registry, nothing to clean")
}
return nil
}
func (b *OsImageBuilder) buildImage() error {
exutil.By("Build image locally")
logger.Infof("Base image: %s\n", b.baseImage)
dockerFile := "FROM " + b.baseImage + "\n" + b.dockerFileCommands + "\n" + ExpirationDockerfileLabel
logger.Infof(" Using Dockerfile:\n%s", dockerFile)
buildDir, err := prepareDockerfileDirectory(b.tmpDir, dockerFile)
if err != nil {
return fmt.Errorf("Error creating the build directory with the Dockerfile. Error: %s", err)
}
podmanCLI := container.NewPodmanCLI()
podmanCLI.ExecCommandPath = buildDir
switch b.architecture {
case architecture.AMD64, architecture.ARM64, architecture.PPC64LE, architecture.S390X:
output, err := podmanCLI.Run("build").Args(buildDir, "--arch", b.architecture.String(), "--tag", b.osImage, "--authfile", b.dockerConfig).Output()
if err != nil {
msg := fmt.Sprintf("Podman failed building image %s with architecture %s:\n%s\n%s", b.osImage, b.architecture, output, err)
logger.Errorf(msg)
return fmt.Errorf(msg)
}
logger.Debugf(output)
default:
msg := fmt.Sprintf("architecture '%s' is not supported. ", b.architecture)
logger.Errorf(msg)
return fmt.Errorf(msg)
}
logger.Infof("OK!\n")
return nil
}
func (b *OsImageBuilder) pushImage() error {
if b.osImage == "" {
return fmt.Errorf("There is no image to be pushed. Wast the osImage built?")
}
exutil.By("Push osImage")
logger.Infof("Pushing image %s", b.osImage)
podmanCLI := container.NewPodmanCLI()
output, err := podmanCLI.Run("push").Args(b.osImage, "--tls-verify=false", "--authfile", b.dockerConfig).Output()
if err != nil {
msg := fmt.Sprintf("Podman failed pushing image %s:\n%s\n%s", b.osImage, output, err)
logger.Errorf(msg)
return fmt.Errorf(msg)
}
logger.Debugf(output)
logger.Infof("OK!\n")
return nil
}
func (b *OsImageBuilder) removeImage() error {
if b.osImage == "" {
return fmt.Errorf("There is no image to be removed. Wast the osImage built?")
}
logger.Infof("Removing image %s", b.osImage)
podmanCLI := container.NewPodmanCLI()
rmOutput, err := podmanCLI.Run("rmi").Args("-i", b.osImage).Output()
if err != nil {
msg := fmt.Sprintf("Podman failed removing image %s:\n%s\n%s", b.osImage, rmOutput, err)
logger.Errorf(msg)
return fmt.Errorf(msg)
}
logger.Debugf(rmOutput)
logger.Infof("OK!\n")
return nil
}
func (b *OsImageBuilder) digestImage() (string, error) {
if b.osImage == "" {
return "", fmt.Errorf("There is no image to be digested. Wast the osImage built?")
}
skopeoCLI := NewSkopeoCLI().SetAuthFile(b.dockerConfig)
inspectInfo, err := skopeoCLI.Run("inspect").Args("--tls-verify=false", "docker://"+b.osImage).Output()
if err != nil {
msg := fmt.Sprintf("Skopeo failed inspecting image %s:\n%s\n%s", b.osImage, inspectInfo, err)
logger.Errorf(msg)
return "", fmt.Errorf(msg)
}
logger.Debugf(inspectInfo)
inspectJSON := JSON(inspectInfo)
digestedImage := inspectJSON.Get("Name").ToString() + "@" + inspectJSON.Get("Digest").ToString()
logger.Infof("Image %s was digested as %s", b.osImage, digestedImage)
return digestedImage, nil
}
// CreateAndDigestOsImage create the osImage and returns the image digested
func (b *OsImageBuilder) CreateAndDigestOsImage() (string, error) {
if err := b.prepareEnvironment(); err != nil {
return "", err
}
if err := b.buildImage(); err != nil {
return "", err
}
if err := b.pushImage(); err != nil {
return "", err
}
if err := b.removeImage(); err != nil {
return "", err
}
return b.digestImage()
}
func prepareDockerfileDirectory(baseDir, dockerFileContent string) (string, error) {
layout := "2006_01_02T15-04-05Z"
directory := filepath.Join(baseDir, fmt.Sprintf("containerbuild-%s", time.Now().Format(layout)))
if err := os.Mkdir(directory, os.ModePerm); err != nil {
return "", err
}
dockerFile := filepath.Join(directory, "Dockerfile")
if err := os.WriteFile(dockerFile, []byte(dockerFileContent), 0o644); err != nil {
return "", err
}
return directory, nil
}
func getImageFromReleaseInfo(oc *exutil.CLI, imageName, dockerConfigFile string) (string, error) {
stdout, stderr, err := oc.Run("adm").Args("release", "info", "--insecure", "--image-for", imageName,
"--registry-config", dockerConfigFile).Outputs()
if err != nil {
logger.Errorf("STDOUT: %s", stdout)
logger.Errorf("STDERR: %s", stderr)
return stdout + stderr, err
}
return stdout, nil
}
func getLayeringTestImageRepository(defaultTag string) string {
layeringImageRepo, exists := os.LookupEnv(EnvVarLayeringTestImageRepository)
if !exists {
layeringImageRepo = DefaultLayeringQuayRepository
}
// If no tag is provided for the image, we add one
if !strings.Contains(layeringImageRepo, ":") && defaultTag != "" {
layeringImageRepo = layeringImageRepo + ":" + defaultTag
}
return layeringImageRepo
}
func generateUniqueTag(oc *exutil.CLI, baseImage string) (string, error) {
var encoding = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567")
baseImageSlice := strings.Split(baseImage, "@")
if len(baseImageSlice) != 2 {
return "", fmt.Errorf("The name of the base image %s is not properly formatted as a diggested image", baseImage)
}
rhelCoreosDigest := baseImageSlice[1]
clusterName, err := exutil.GetInfraID(oc)
if err != nil {
return "", nil
}
testCaseID := GetCurrentTestPolarionIDNumber()
s := fmt.Sprintf("%s%s%s", rhelCoreosDigest, clusterName, uuid.NewString())
uniqueTag := fmt.Sprintf("%s-%s", testCaseID,
strings.TrimRight(encoding.EncodeToString(fnv.New64().Sum([]byte(s))), "=")[:(127-len(testCaseID))])
logger.Infof("Using unique tag %s", uniqueTag)
return uniqueTag, nil
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
86f7d08a-2db5-410f-b752-2d4796813f62
|
prepareEnvironment
|
['"fmt"', '"path/filepath"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['OsImageBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func (b *OsImageBuilder) prepareEnvironment() error {
var err error
if b.dockerConfig == "" {
logger.Infof("No docker config file was provided to the osImage builder. Generating a new docker config file")
exutil.By("Extract pull-secret")
pullSecret := GetPullSecret(b.oc.AsAdmin())
tokenDir, err := pullSecret.Extract()
if err != nil {
return fmt.Errorf("Error extracting pull-secret. Error: %s", err)
}
logger.Infof("Pull secret has been extracted to: %s\n", tokenDir)
b.dockerConfig = filepath.Join(tokenDir, ".dockerconfigjson")
}
logger.Infof("Using docker config file: %s\n", b.dockerConfig)
b.architecture = architecture.ClusterArchitecture(b.oc)
logger.Infof("Building using architecture: %s", b.architecture)
b.baseImage, err = getImageFromReleaseInfo(b.oc.AsAdmin(), LayeringBaseImageReleaseInfo, b.dockerConfig)
if err != nil {
return fmt.Errorf("Error getting the base image to build new osImages. Error: %s", err)
}
if b.UseInternalRegistry {
if err := b.preparePushToInternalRegistry(); err != nil {
return err
}
} else if b.osImage == "" {
uniqueTag, err := generateUniqueTag(b.oc.AsAdmin(), b.baseImage)
if err != nil {
return err
}
b.osImage = getLayeringTestImageRepository(uniqueTag)
}
logger.Infof("Building image: %s", b.osImage)
if b.tmpDir == "" {
b.tmpDir = e2e.TestContext.OutputDir
}
return nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
298fdc27-d04c-48f3-b208-9fff60a83656
|
preparePushToInternalRegistry
|
['"fmt"', 'container "github.com/openshift/openshift-tests-private/test/extended/util/container"']
|
['OsImageBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func (b *OsImageBuilder) preparePushToInternalRegistry() error {
exposed, expErr := b.oc.Run("get").Args("configs.imageregistry.operator.openshift.io", "cluster", `-ojsonpath={.spec.defaultRoute}`).Output()
if expErr != nil {
return fmt.Errorf("Error getting internal registry configuration. Error: %s", expErr)
}
if !IsTrue(exposed) {
b.cleanupRegistryRoute = true
logger.Infof("The internal registry service is not exposed. Exposing internal registry service...")
expErr := b.oc.Run("patch").Args("configs.imageregistry.operator.openshift.io/cluster", "--patch", `{"spec":{"defaultRoute":true}}`, "--type=merge").Execute()
if expErr != nil {
return fmt.Errorf("Error exposing internal registry. Error: %s", expErr)
}
}
logger.Infof("Create namespace to store the service account to access the internal registry")
nsExistsErr := b.oc.Run("get").Args("namespace", layeringTestsTmpNamespace).Execute()
if nsExistsErr != nil {
err := b.oc.Run("create").Args("namespace", layeringTestsTmpNamespace).Execute()
if err != nil {
return fmt.Errorf("Error creating namespace %s to store the layering imagestreams. Error: %s",
layeringTestsTmpNamespace, err)
}
} else {
logger.Infof("Namespace %s already exists. Skip namespace creation", layeringTestsTmpNamespace)
}
logger.Infof("Create service account with registry admin permissions to store the imagestream")
saExistsErr := b.oc.Run("get").Args("-n", layeringTestsTmpNamespace, "serviceaccount", layeringRegistryAdminSAName).Execute()
if saExistsErr != nil {
cErr := b.oc.Run("create").Args("-n", layeringTestsTmpNamespace, "serviceaccount", layeringRegistryAdminSAName).Execute()
if cErr != nil {
return fmt.Errorf("Error creating ServiceAccount %s/%s: %s", layeringTestsTmpNamespace, layeringRegistryAdminSAName, cErr)
}
} else {
logger.Infof("SA %s/%s already exists. Skip SA creation", layeringTestsTmpNamespace, layeringRegistryAdminSAName)
}
admErr := b.oc.Run("adm").Args("-n", layeringTestsTmpNamespace, "policy", "add-cluster-role-to-user", "registry-admin", "-z", layeringRegistryAdminSAName).Execute()
if admErr != nil {
return fmt.Errorf("Error creating ServiceAccount %s: %s", layeringRegistryAdminSAName, admErr)
}
logger.Infof("Get SA token")
saToken, err := b.oc.Run("create").Args("-n", layeringTestsTmpNamespace, "token", layeringRegistryAdminSAName).Output()
if err != nil {
logger.Errorf("Error getting token for SA %s", layeringRegistryAdminSAName)
return err
}
logger.Debugf("SA TOKEN: %s", saToken)
logger.Infof("OK!\n")
logger.Infof("Get current internal registry route")
internalRegistryURL, routeErr := b.oc.Run("get").Args("route", "default-route", "-n", "openshift-image-registry", "--template", `{{ .spec.host }}`).Output()
if routeErr != nil {
return fmt.Errorf("Error getting internal registry's route. Ourput: %s\nError: %s", internalRegistryURL, routeErr)
}
logger.Infof("Current internal registry route: %s", internalRegistryURL)
uniqueTag, err := generateUniqueTag(b.oc.AsAdmin(), b.baseImage)
if err != nil {
return err
}
b.osImage = fmt.Sprintf("%s/%s/%s:%s", internalRegistryURL, MachineConfigNamespace, "layering", uniqueTag)
logger.Infof("Using image: %s", b.osImage)
logger.Infof("Loging as registry admin to internal registry")
podmanCLI := container.NewPodmanCLI()
loginOut, loginErr := podmanCLI.Run("login").Args(internalRegistryURL, "-u", layeringRegistryAdminSAName, "-p", saToken, "--tls-verify=false", "--authfile", b.dockerConfig).Output()
if loginErr != nil {
return fmt.Errorf("Error trying to login to internal registry:\nOutput:%s\nError:%s", loginOut, loginErr)
}
logger.Infof("OK!\n")
return nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
ff643878-b82b-4728-b594-0ffc0670727f
|
CleanUp
|
['"fmt"']
|
['OsImageBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func (b *OsImageBuilder) CleanUp() error {
logger.Infof("Cleanup image builder resources")
if b.UseInternalRegistry {
logger.Infof("Removing namespace %s", layeringTestsTmpNamespace)
err := b.oc.Run("delete").Args("namespace", layeringTestsTmpNamespace, "--ignore-not-found").Execute()
if err != nil {
return fmt.Errorf("Error deleting namespace %s. Error: %s",
layeringTestsTmpNamespace, err)
}
if b.cleanupRegistryRoute {
logger.Infof("The internal registry route was exposed. Remove the exposed internal registry route to restore initial state.")
expErr := b.oc.Run("patch").Args("configs.imageregistry.operator.openshift.io/cluster", "--patch", `{"spec":{"defaultRoute":false}}`, "--type=merge").Execute()
if expErr != nil {
return fmt.Errorf("Error exposing internal registry. Error: %s", expErr)
}
}
} else {
logger.Infof("Not using internal registry, nothing to clean")
}
return nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
db44b468-5f45-4aaa-84e8-0dffff759390
|
buildImage
|
['"fmt"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'container "github.com/openshift/openshift-tests-private/test/extended/util/container"']
|
['OsImageBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func (b *OsImageBuilder) buildImage() error {
exutil.By("Build image locally")
logger.Infof("Base image: %s\n", b.baseImage)
dockerFile := "FROM " + b.baseImage + "\n" + b.dockerFileCommands + "\n" + ExpirationDockerfileLabel
logger.Infof(" Using Dockerfile:\n%s", dockerFile)
buildDir, err := prepareDockerfileDirectory(b.tmpDir, dockerFile)
if err != nil {
return fmt.Errorf("Error creating the build directory with the Dockerfile. Error: %s", err)
}
podmanCLI := container.NewPodmanCLI()
podmanCLI.ExecCommandPath = buildDir
switch b.architecture {
case architecture.AMD64, architecture.ARM64, architecture.PPC64LE, architecture.S390X:
output, err := podmanCLI.Run("build").Args(buildDir, "--arch", b.architecture.String(), "--tag", b.osImage, "--authfile", b.dockerConfig).Output()
if err != nil {
msg := fmt.Sprintf("Podman failed building image %s with architecture %s:\n%s\n%s", b.osImage, b.architecture, output, err)
logger.Errorf(msg)
return fmt.Errorf(msg)
}
logger.Debugf(output)
default:
msg := fmt.Sprintf("architecture '%s' is not supported. ", b.architecture)
logger.Errorf(msg)
return fmt.Errorf(msg)
}
logger.Infof("OK!\n")
return nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
3c5d200f-1f68-4a6c-8205-1c5622f2343e
|
pushImage
|
['"fmt"', 'container "github.com/openshift/openshift-tests-private/test/extended/util/container"']
|
['OsImageBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func (b *OsImageBuilder) pushImage() error {
if b.osImage == "" {
return fmt.Errorf("There is no image to be pushed. Wast the osImage built?")
}
exutil.By("Push osImage")
logger.Infof("Pushing image %s", b.osImage)
podmanCLI := container.NewPodmanCLI()
output, err := podmanCLI.Run("push").Args(b.osImage, "--tls-verify=false", "--authfile", b.dockerConfig).Output()
if err != nil {
msg := fmt.Sprintf("Podman failed pushing image %s:\n%s\n%s", b.osImage, output, err)
logger.Errorf(msg)
return fmt.Errorf(msg)
}
logger.Debugf(output)
logger.Infof("OK!\n")
return nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
704dc022-9fbf-4595-9378-f7489a692f78
|
removeImage
|
['"fmt"', 'container "github.com/openshift/openshift-tests-private/test/extended/util/container"']
|
['OsImageBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func (b *OsImageBuilder) removeImage() error {
if b.osImage == "" {
return fmt.Errorf("There is no image to be removed. Wast the osImage built?")
}
logger.Infof("Removing image %s", b.osImage)
podmanCLI := container.NewPodmanCLI()
rmOutput, err := podmanCLI.Run("rmi").Args("-i", b.osImage).Output()
if err != nil {
msg := fmt.Sprintf("Podman failed removing image %s:\n%s\n%s", b.osImage, rmOutput, err)
logger.Errorf(msg)
return fmt.Errorf(msg)
}
logger.Debugf(rmOutput)
logger.Infof("OK!\n")
return nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
7e2a6fab-4c3f-4618-bfa3-ad4c9844d366
|
digestImage
|
['"fmt"']
|
['OsImageBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func (b *OsImageBuilder) digestImage() (string, error) {
if b.osImage == "" {
return "", fmt.Errorf("There is no image to be digested. Wast the osImage built?")
}
skopeoCLI := NewSkopeoCLI().SetAuthFile(b.dockerConfig)
inspectInfo, err := skopeoCLI.Run("inspect").Args("--tls-verify=false", "docker://"+b.osImage).Output()
if err != nil {
msg := fmt.Sprintf("Skopeo failed inspecting image %s:\n%s\n%s", b.osImage, inspectInfo, err)
logger.Errorf(msg)
return "", fmt.Errorf(msg)
}
logger.Debugf(inspectInfo)
inspectJSON := JSON(inspectInfo)
digestedImage := inspectJSON.Get("Name").ToString() + "@" + inspectJSON.Get("Digest").ToString()
logger.Infof("Image %s was digested as %s", b.osImage, digestedImage)
return digestedImage, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
a0ee00ce-dd47-4c0b-908a-e75a049c6e85
|
CreateAndDigestOsImage
|
['OsImageBuilder']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func (b *OsImageBuilder) CreateAndDigestOsImage() (string, error) {
if err := b.prepareEnvironment(); err != nil {
return "", err
}
if err := b.buildImage(); err != nil {
return "", err
}
if err := b.pushImage(); err != nil {
return "", err
}
if err := b.removeImage(); err != nil {
return "", err
}
return b.digestImage()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
ea444449-4e14-42de-8c2d-941355dad8cf
|
prepareDockerfileDirectory
|
['"fmt"', '"os"', '"path/filepath"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func prepareDockerfileDirectory(baseDir, dockerFileContent string) (string, error) {
layout := "2006_01_02T15-04-05Z"
directory := filepath.Join(baseDir, fmt.Sprintf("containerbuild-%s", time.Now().Format(layout)))
if err := os.Mkdir(directory, os.ModePerm); err != nil {
return "", err
}
dockerFile := filepath.Join(directory, "Dockerfile")
if err := os.WriteFile(dockerFile, []byte(dockerFileContent), 0o644); err != nil {
return "", err
}
return directory, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
e9006c72-eb66-4b84-bc2a-13339c5ce40b
|
getImageFromReleaseInfo
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func getImageFromReleaseInfo(oc *exutil.CLI, imageName, dockerConfigFile string) (string, error) {
stdout, stderr, err := oc.Run("adm").Args("release", "info", "--insecure", "--image-for", imageName,
"--registry-config", dockerConfigFile).Outputs()
if err != nil {
logger.Errorf("STDOUT: %s", stdout)
logger.Errorf("STDERR: %s", stderr)
return stdout + stderr, err
}
return stdout, nil
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
62e6136e-8eab-4521-b7b1-056d6618ab85
|
getLayeringTestImageRepository
|
['"os"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func getLayeringTestImageRepository(defaultTag string) string {
layeringImageRepo, exists := os.LookupEnv(EnvVarLayeringTestImageRepository)
if !exists {
layeringImageRepo = DefaultLayeringQuayRepository
}
// If no tag is provided for the image, we add one
if !strings.Contains(layeringImageRepo, ":") && defaultTag != "" {
layeringImageRepo = layeringImageRepo + ":" + defaultTag
}
return layeringImageRepo
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
57710c07-5c45-4215-804e-4f1768993381
|
generateUniqueTag
|
['"encoding/base32"', '"fmt"', '"hash/fnv"', '"strings"', '"github.com/google/uuid"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/containers.go
|
func generateUniqueTag(oc *exutil.CLI, baseImage string) (string, error) {
var encoding = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567")
baseImageSlice := strings.Split(baseImage, "@")
if len(baseImageSlice) != 2 {
return "", fmt.Errorf("The name of the base image %s is not properly formatted as a diggested image", baseImage)
}
rhelCoreosDigest := baseImageSlice[1]
clusterName, err := exutil.GetInfraID(oc)
if err != nil {
return "", nil
}
testCaseID := GetCurrentTestPolarionIDNumber()
s := fmt.Sprintf("%s%s%s", rhelCoreosDigest, clusterName, uuid.NewString())
uniqueTag := fmt.Sprintf("%s-%s", testCaseID,
strings.TrimRight(encoding.EncodeToString(fnv.New64().Sum([]byte(s))), "=")[:(127-len(testCaseID))])
logger.Infof("Using unique tag %s", uniqueTag)
return uniqueTag, nil
}
|
mco
| ||||
file
|
openshift/openshift-tests-private
|
0c554afd-76f7-4f1e-a7e9-5f57a7d9848e
|
controllerconfig
|
import (
"encoding/json"
"fmt"
b64 "encoding/base64"
"github.com/tidwall/gjson"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
package mco
import (
"encoding/json"
"fmt"
b64 "encoding/base64"
"github.com/tidwall/gjson"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
// ControllerConfig struct is used to handle ControllerConfig resources in OCP
type ControllerConfig struct {
Resource
}
// CertificateInfo stores the information regarding a given certificate
type CertificateInfo struct {
// subject is the cert subject
Subject string `json:"subject"`
// signer is the cert Issuer
Signer string `json:"signer"`
// Date fields have been temporarily removed by devs: https://github.com/openshift/machine-config-operator/pull/3866
// notBefore is the lower boundary for validity
NotBefore string `json:"notBefore"`
// notAfter is the upper boundary for validity
NotAfter string `json:"notAfter"`
// bundleFile is the larger bundle a cert comes from
BundleFile string `json:"bundleFile"`
}
// NewControllerConfig create a ControllerConfig struct
func NewControllerConfig(oc *exutil.CLI, name string) *ControllerConfig {
return &ControllerConfig{Resource: *NewResource(oc, "ControllerConfig", name)}
}
// GetKubeAPIServerServingCAData return the base64 decoded value of the kubeAPIServerServingCAData bundle stored in the ControllerConfig
func (cc *ControllerConfig) GetKubeAPIServerServingCAData() (string, error) {
b64KubeAPIServerServingData, err := cc.Get(`{.spec.kubeAPIServerServingCAData}`)
if err != nil {
return "", err
}
kubeAPIServerServingCAData, err := b64.StdEncoding.DecodeString(b64KubeAPIServerServingData)
if err != nil {
return "", err
}
return string(kubeAPIServerServingCAData), err
}
// GetRootCAData return the base64 decoded value of the rootCA bundle stored in the ControllerConfig
func (cc *ControllerConfig) GetRootCAData() (string, error) {
b64RootCAData, err := cc.Get(`{.spec.rootCAData}`)
if err != nil {
return "", err
}
rootCAData, err := b64.StdEncoding.DecodeString(b64RootCAData)
if err != nil {
return "", err
}
return string(rootCAData), err
}
// GetImageRegistryBundleData returns a map[string]string containing the filenames and values of the image registry bundle data
func (cc *ControllerConfig) GetImageRegistryBundleData() (map[string]string, error) {
return cc.GetImageRegistryBundle("imageRegistryBundleData")
}
// GetImageRegistryBundleUserData returns a map[string]string containing the filenames and values of the image registry bundle user data
func (cc *ControllerConfig) GetImageRegistryBundleUserData() (map[string]string, error) {
return cc.GetImageRegistryBundle("imageRegistryBundleUserData")
}
// GetImageRegistryBundle returns a map[string]string containing the filenames and values of the image registry certificates in a Bundle field
func (cc *ControllerConfig) GetImageRegistryBundle(bundleField string) (map[string]string, error) {
certs := map[string]string{}
bundleData, err := cc.Get(`{.spec.` + bundleField + `}`)
if err != nil {
return nil, err
}
parsedBundleData := gjson.Parse(bundleData)
var b64Err error
parsedBundleData.ForEach(func(_, item gjson.Result) bool {
file := item.Get("file").String()
data64 := item.Get("data").String()
data, b64Err := b64.StdEncoding.DecodeString(data64)
if err != nil {
logger.Infof("Error decoding data for image registry bundle file %s: %s", file, b64Err)
return false // stop iterating
}
certs[file] = string(data)
return true // keep iterating
})
if b64Err != nil {
return nil, b64Err
}
return certs, nil
}
// GetImageRegistryBundleByFileName returns the image registry bundle searching by bundle filename
func (cc *ControllerConfig) GetImageRegistryBundleDataByFileName(fileName string) (string, error) {
certs, err := cc.GetImageRegistryBundleData()
if err != nil {
return "", err
}
data, ok := certs[fileName]
if !ok {
return "", fmt.Errorf("There is no image registry bundle with file name %s", fileName)
}
return data, nil
}
// GetImageRegistryUserBundleByFileName returns the image registry bundle searching by bundle filename
func (cc *ControllerConfig) GetImageRegistryBundleUserDataByFileName(fileName string) (string, error) {
certs, err := cc.GetImageRegistryBundleUserData()
if err != nil {
return "", err
}
data, ok := certs[fileName]
if !ok {
return "", fmt.Errorf("There is no image registry bundle with file name %s", fileName)
}
return data, nil
}
// Returns a list of CertificateInfo structs with the information of all the certificates tracked by ControllerConfig
func (cc *ControllerConfig) GetCertificatesInfo() ([]CertificateInfo, error) {
certsInfoString := cc.GetOrFail(`{.status.controllerCertificates}`)
logger.Debugf("CERTIFICATES: %s", certsInfoString)
var certsInfo []CertificateInfo
jsonerr := json.Unmarshal([]byte(certsInfoString), &certsInfo)
if jsonerr != nil {
return nil, jsonerr
}
return certsInfo, nil
}
func (cc *ControllerConfig) GetCertificatesInfoByBundleFileName(bundleFile string) ([]CertificateInfo, error) {
var certsInfo []CertificateInfo
allCertsInfo, err := cc.GetCertificatesInfo()
if err != nil {
return nil, err
}
for _, ciLoop := range allCertsInfo {
ci := ciLoop
if ci.BundleFile == bundleFile {
certsInfo = append(certsInfo, ci)
}
}
return certsInfo, nil
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
39e7328e-408b-4445-a3c3-cb4b8cc348e4
|
NewControllerConfig
|
['ControllerConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
func NewControllerConfig(oc *exutil.CLI, name string) *ControllerConfig {
return &ControllerConfig{Resource: *NewResource(oc, "ControllerConfig", name)}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
98185979-a1fc-48a6-a82d-0657417850df
|
GetKubeAPIServerServingCAData
|
['ControllerConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
func (cc *ControllerConfig) GetKubeAPIServerServingCAData() (string, error) {
b64KubeAPIServerServingData, err := cc.Get(`{.spec.kubeAPIServerServingCAData}`)
if err != nil {
return "", err
}
kubeAPIServerServingCAData, err := b64.StdEncoding.DecodeString(b64KubeAPIServerServingData)
if err != nil {
return "", err
}
return string(kubeAPIServerServingCAData), err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
6ebef262-e849-4af9-b65e-7c3175ea7911
|
GetRootCAData
|
['ControllerConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
func (cc *ControllerConfig) GetRootCAData() (string, error) {
b64RootCAData, err := cc.Get(`{.spec.rootCAData}`)
if err != nil {
return "", err
}
rootCAData, err := b64.StdEncoding.DecodeString(b64RootCAData)
if err != nil {
return "", err
}
return string(rootCAData), err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
12313770-1e1f-42ba-95d4-f71e3faaa6a7
|
GetImageRegistryBundleData
|
['ControllerConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
func (cc *ControllerConfig) GetImageRegistryBundleData() (map[string]string, error) {
return cc.GetImageRegistryBundle("imageRegistryBundleData")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
92268cb8-2762-4072-986f-1c9e9c63357d
|
GetImageRegistryBundleUserData
|
['ControllerConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
func (cc *ControllerConfig) GetImageRegistryBundleUserData() (map[string]string, error) {
return cc.GetImageRegistryBundle("imageRegistryBundleUserData")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
5c20426d-6385-4472-8f14-519032bf9a4d
|
GetImageRegistryBundle
|
['"github.com/tidwall/gjson"']
|
['ControllerConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
func (cc *ControllerConfig) GetImageRegistryBundle(bundleField string) (map[string]string, error) {
certs := map[string]string{}
bundleData, err := cc.Get(`{.spec.` + bundleField + `}`)
if err != nil {
return nil, err
}
parsedBundleData := gjson.Parse(bundleData)
var b64Err error
parsedBundleData.ForEach(func(_, item gjson.Result) bool {
file := item.Get("file").String()
data64 := item.Get("data").String()
data, b64Err := b64.StdEncoding.DecodeString(data64)
if err != nil {
logger.Infof("Error decoding data for image registry bundle file %s: %s", file, b64Err)
return false // stop iterating
}
certs[file] = string(data)
return true // keep iterating
})
if b64Err != nil {
return nil, b64Err
}
return certs, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
e476c33d-f21d-47ca-932e-e6b9948001bf
|
GetImageRegistryBundleDataByFileName
|
['"fmt"']
|
['ControllerConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
func (cc *ControllerConfig) GetImageRegistryBundleDataByFileName(fileName string) (string, error) {
certs, err := cc.GetImageRegistryBundleData()
if err != nil {
return "", err
}
data, ok := certs[fileName]
if !ok {
return "", fmt.Errorf("There is no image registry bundle with file name %s", fileName)
}
return data, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
1a58b9a6-403f-4e41-a1fd-d38178b14a2b
|
GetImageRegistryBundleUserDataByFileName
|
['"fmt"']
|
['ControllerConfig']
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
func (cc *ControllerConfig) GetImageRegistryBundleUserDataByFileName(fileName string) (string, error) {
certs, err := cc.GetImageRegistryBundleUserData()
if err != nil {
return "", err
}
data, ok := certs[fileName]
if !ok {
return "", fmt.Errorf("There is no image registry bundle with file name %s", fileName)
}
return data, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
66d6f7b0-7015-4f91-a563-5e1698191f67
|
GetCertificatesInfo
|
['"encoding/json"']
|
['ControllerConfig', 'CertificateInfo']
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
func (cc *ControllerConfig) GetCertificatesInfo() ([]CertificateInfo, error) {
certsInfoString := cc.GetOrFail(`{.status.controllerCertificates}`)
logger.Debugf("CERTIFICATES: %s", certsInfoString)
var certsInfo []CertificateInfo
jsonerr := json.Unmarshal([]byte(certsInfoString), &certsInfo)
if jsonerr != nil {
return nil, jsonerr
}
return certsInfo, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
31e8e70f-cae6-4b13-a6fd-61f9d5c9063f
|
GetCertificatesInfoByBundleFileName
|
['ControllerConfig', 'CertificateInfo']
|
github.com/openshift/openshift-tests-private/test/extended/mco/controllerconfig.go
|
func (cc *ControllerConfig) GetCertificatesInfoByBundleFileName(bundleFile string) ([]CertificateInfo, error) {
var certsInfo []CertificateInfo
allCertsInfo, err := cc.GetCertificatesInfo()
if err != nil {
return nil, err
}
for _, ciLoop := range allCertsInfo {
ci := ciLoop
if ci.BundleFile == bundleFile {
certsInfo = append(certsInfo, ci)
}
}
return certsInfo, nil
}
|
mco
| ||||
file
|
openshift/openshift-tests-private
|
29d86740-af76-410f-9bf8-d6ffb90e928f
|
events
|
import (
"fmt"
"sort"
"time"
"github.com/onsi/gomega/types"
g "github.com/onsi/ginkgo/v2"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
package mco
import (
"fmt"
"sort"
"time"
"github.com/onsi/gomega/types"
g "github.com/onsi/ginkgo/v2"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
// Event struct is used to handle Event resources in OCP
type Event struct {
Resource
}
// EventList handles list of nodes
type EventList struct {
ResourceList
}
// NewEvent create a Event struct
func NewEvent(oc *exutil.CLI, namespace, name string) *Event {
return &Event{Resource: *NewNamespacedResource(oc, "Event", namespace, name)}
}
// String implements the Stringer interface
func (e Event) String() string {
e.oc.NotShowInfo()
defer e.oc.SetShowInfo()
description, err := e.Get(`{.metadata.creationTimestamp} {.lastTimestamp} Type: {.type} Reason: {.reason} Namespace: {.metadata.namespace} Involves: {.involvedObject.kind}/{.involvedObject.name}`)
if err != nil {
logger.Errorf("Event %s/%s does not exist anymore", e.GetNamespace(), e.GetName())
return ""
}
return description
}
// GetLastTimestamp returns the last occurrence of this event
func (e Event) GetLastTimestamp() (time.Time, error) {
lastOccurrence, err := e.Get(`{.lastTimestamp}`)
if err != nil {
logger.Errorf("Error parsing event %s/%s. Error: %s", e.GetNamespace(), e.GetName(), err)
return time.Time{}, err
}
parsedLastOccurrence, perr := time.Parse(time.RFC3339, lastOccurrence)
if perr != nil {
logger.Errorf("Error parsing event '%s' -n '%s' lastTimestamp: %s", e.GetName(), e.GetNamespace(), perr)
return time.Time{}, perr
}
return parsedLastOccurrence, nil
}
// NewEventList construct a new node list struct to handle all existing nodes
func NewEventList(oc *exutil.CLI, namespace string) *EventList {
return &EventList{*NewNamespacedResourceList(oc, "Event", namespace)}
}
// GetAll returns a []Event list with all existing events sorted by last occurrence
// the first element will be the most recent one
func (el *EventList) GetAll() ([]Event, error) {
el.SortByLastTimestamp()
allEventResources, err := el.ResourceList.GetAll()
if err != nil {
return nil, err
}
allEvents := make([]Event, 0, len(allEventResources))
for _, eventRes := range allEventResources {
allEvents = append(allEvents, *NewEvent(el.oc, eventRes.namespace, eventRes.name))
}
// We want the first element to be the more recent
allEvents = reverseEventsList(allEvents)
return allEvents, nil
}
// SortByLastTimestamp configures the list to be sorted by lastTimestamp field
func (el *EventList) SortByLastTimestamp() {
el.ResourceList.SortBy("lastTimestamp")
}
// GetLatest returns the latest event that occurred. Nil if no event exists.
func (el *EventList) GetLatest() (*Event, error) {
allEvents, lerr := el.GetAll()
if lerr != nil {
logger.Errorf("Error getting events %s", lerr)
return nil, lerr
}
if len(allEvents) == 0 {
return nil, nil
}
return &(allEvents[0]), nil
}
// GetAllEventsSinceEvent returns all events that occurred since a given event (not included)
func (el *EventList) GetAllEventsSinceEvent(sinceEvent *Event) ([]Event, error) {
allEvents, lerr := el.GetAll()
if lerr != nil {
logger.Errorf("Error getting events %s", lerr)
return nil, lerr
}
if sinceEvent == nil {
return allEvents, nil
}
returnEvents := []Event{}
for _, event := range allEvents {
if event.name == sinceEvent.name {
break
}
returnEvents = append(returnEvents, event)
}
return returnEvents, nil
}
// GetAllSince return a list of the events that happened since the provided duration
func (el EventList) GetAllSince(since time.Time) ([]Event, error) {
// Remove log noise
el.oc.NotShowInfo()
defer el.oc.SetShowInfo()
allEvents, lerr := el.GetAll()
if lerr != nil {
logger.Errorf("Error getting events %s", lerr)
return nil, lerr
}
returnEvents := []Event{}
for _, loopEvent := range allEvents {
event := loopEvent // this is to make sure that we execute defer in all events, and not only in the last one
// Remove log noise
event.oc.NotShowInfo()
defer event.oc.SetShowInfo()
lastOccurrence, err := event.GetLastTimestamp()
if err != nil {
logger.Errorf("Error getting lastTimestamp in event %s/%s. Error: %s", event.GetNamespace(), event.GetName(), err)
continue
}
if lastOccurrence.Before(since) {
break
}
returnEvents = append(returnEvents, event)
}
return returnEvents, nil
}
// from https://github.com/golang/go/wiki/SliceTricks#reversing
func reverseEventsList(a []Event) []Event {
for i := len(a)/2 - 1; i >= 0; i-- {
opp := len(a) - 1 - i
a[i], a[opp] = a[opp], a[i]
}
return a
}
// HaveEventsSequence returns a gomega matcher that checks if a list of Events contains a given sequence of reasons
func HaveEventsSequence(sequence ...string) types.GomegaMatcher {
return &haveEventsSequenceMatcher{sequence: sequence}
}
// struct to cache and sort events information
type tmpEvent struct {
lastTimestamp time.Time
reason string
}
func (t tmpEvent) String() string { return fmt.Sprintf("%s - %s", t.lastTimestamp, t.reason) }
// sorter to sort the chache event list
type byLastTimestamp []tmpEvent
func (a byLastTimestamp) Len() int { return len(a) }
func (a byLastTimestamp) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byLastTimestamp) Less(i, j int) bool {
return a[i].lastTimestamp.Before(a[j].lastTimestamp)
}
// struct implementing gomaega matcher interface
type haveEventsSequenceMatcher struct {
sequence []string
}
func (matcher *haveEventsSequenceMatcher) Match(actual interface{}) (success bool, err error) {
logger.Infof("Start verifying events sequence: %s", matcher.sequence)
events, ok := actual.([]Event)
if !ok {
return false, fmt.Errorf("HaveSequence matcher expects a slice of Events in test case %v", g.CurrentSpecReport().FullText())
}
// To avoid too many "oc" executions we store the events information in a cached struct list with "lastTimestamp" and "reason" fields.
tmpEvents := []tmpEvent{}
for _, loopEvent := range events {
event := loopEvent // this is to make sure that we execute defer in all events, and not only in the last one
event.oc.NotShowInfo()
defer event.oc.SetShowInfo()
reason, err := event.Get(`{.reason}`)
if err != nil {
return false, err
}
lastTimestamp, err := event.GetLastTimestamp()
if err != nil {
return false, err
}
tmpEvents = append(tmpEvents, tmpEvent{lastTimestamp: lastTimestamp, reason: reason})
}
// We sort the cached list. Oldest event first
sort.Sort(byLastTimestamp(tmpEvents))
// Several events can be created in the same second, hence, we need to take into account
// that 2 events in the same second can match any order.
// If 2 events have the same timestamp
// we consider that the order is right no matter what.
lastEventTime := time.Time{}
for _, seqReason := range matcher.sequence {
found := false
for _, event := range tmpEvents {
if seqReason == event.reason &&
(lastEventTime.Before(event.lastTimestamp) || lastEventTime.Equal(event.lastTimestamp)) {
logger.Infof("Found! %s event in time %s", seqReason, event.lastTimestamp)
lastEventTime = event.lastTimestamp
found = true
break
}
}
// Could not find an event with the sequence's reason. We fail the match
if !found {
logger.Infof("%s event NOT Found after time %s", seqReason, lastEventTime)
return false, nil
}
}
return true, nil
}
func (matcher *haveEventsSequenceMatcher) FailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
events, _ := actual.([]Event)
output := "Expecte events\n"
if len(events) == 0 {
output = "No events in the list\n"
} else {
for _, event := range events {
output += fmt.Sprintf("- %s\n", event)
}
}
output += fmt.Sprintf("to contain this reason sequence\n\t%s\n", matcher.sequence)
return output
}
func (matcher *haveEventsSequenceMatcher) NegatedFailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
events, _ := actual.([]Event)
output := "Expecte events\n"
for _, event := range events {
output += fmt.Sprintf("- %s\n", event)
}
output += output + fmt.Sprintf("NOT to contain this reason sequence\n\t%s\n", matcher.sequence)
return output
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
6e2d7495-67b4-4a2a-a2ea-5e3a9c0f9189
|
NewEvent
|
['Event']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func NewEvent(oc *exutil.CLI, namespace, name string) *Event {
return &Event{Resource: *NewNamespacedResource(oc, "Event", namespace, name)}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
47889e36-48d0-4a72-9c63-d73b3621d7a8
|
String
|
['Event']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (e Event) String() string {
e.oc.NotShowInfo()
defer e.oc.SetShowInfo()
description, err := e.Get(`{.metadata.creationTimestamp} {.lastTimestamp} Type: {.type} Reason: {.reason} Namespace: {.metadata.namespace} Involves: {.involvedObject.kind}/{.involvedObject.name}`)
if err != nil {
logger.Errorf("Event %s/%s does not exist anymore", e.GetNamespace(), e.GetName())
return ""
}
return description
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
3855bd48-a051-4350-b9d4-69d3bd3a0c87
|
GetLastTimestamp
|
['"time"']
|
['Event']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (e Event) GetLastTimestamp() (time.Time, error) {
lastOccurrence, err := e.Get(`{.lastTimestamp}`)
if err != nil {
logger.Errorf("Error parsing event %s/%s. Error: %s", e.GetNamespace(), e.GetName(), err)
return time.Time{}, err
}
parsedLastOccurrence, perr := time.Parse(time.RFC3339, lastOccurrence)
if perr != nil {
logger.Errorf("Error parsing event '%s' -n '%s' lastTimestamp: %s", e.GetName(), e.GetNamespace(), perr)
return time.Time{}, perr
}
return parsedLastOccurrence, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
9695fe56-e837-4f2f-89bb-d95e04957c14
|
NewEventList
|
['Event', 'EventList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func NewEventList(oc *exutil.CLI, namespace string) *EventList {
return &EventList{*NewNamespacedResourceList(oc, "Event", namespace)}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
35e946b7-2c86-466e-88b2-1e67d13689c5
|
GetAll
|
['Event', 'EventList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (el *EventList) GetAll() ([]Event, error) {
el.SortByLastTimestamp()
allEventResources, err := el.ResourceList.GetAll()
if err != nil {
return nil, err
}
allEvents := make([]Event, 0, len(allEventResources))
for _, eventRes := range allEventResources {
allEvents = append(allEvents, *NewEvent(el.oc, eventRes.namespace, eventRes.name))
}
// We want the first element to be the more recent
allEvents = reverseEventsList(allEvents)
return allEvents, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
e3dbad29-a2e8-43f5-abbb-b2fba122c132
|
SortByLastTimestamp
|
['EventList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (el *EventList) SortByLastTimestamp() {
el.ResourceList.SortBy("lastTimestamp")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
95114119-eaf9-4d2d-b9c9-e5f5eec40617
|
GetLatest
|
['Event', 'EventList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (el *EventList) GetLatest() (*Event, error) {
allEvents, lerr := el.GetAll()
if lerr != nil {
logger.Errorf("Error getting events %s", lerr)
return nil, lerr
}
if len(allEvents) == 0 {
return nil, nil
}
return &(allEvents[0]), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
81275cf4-93cb-4d76-895b-3e88cc88a8b3
|
GetAllEventsSinceEvent
|
['Event', 'EventList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (el *EventList) GetAllEventsSinceEvent(sinceEvent *Event) ([]Event, error) {
allEvents, lerr := el.GetAll()
if lerr != nil {
logger.Errorf("Error getting events %s", lerr)
return nil, lerr
}
if sinceEvent == nil {
return allEvents, nil
}
returnEvents := []Event{}
for _, event := range allEvents {
if event.name == sinceEvent.name {
break
}
returnEvents = append(returnEvents, event)
}
return returnEvents, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
9ce745d5-c679-4e53-9797-e23e857cb9d7
|
GetAllSince
|
['"time"']
|
['Event', 'EventList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (el EventList) GetAllSince(since time.Time) ([]Event, error) {
// Remove log noise
el.oc.NotShowInfo()
defer el.oc.SetShowInfo()
allEvents, lerr := el.GetAll()
if lerr != nil {
logger.Errorf("Error getting events %s", lerr)
return nil, lerr
}
returnEvents := []Event{}
for _, loopEvent := range allEvents {
event := loopEvent // this is to make sure that we execute defer in all events, and not only in the last one
// Remove log noise
event.oc.NotShowInfo()
defer event.oc.SetShowInfo()
lastOccurrence, err := event.GetLastTimestamp()
if err != nil {
logger.Errorf("Error getting lastTimestamp in event %s/%s. Error: %s", event.GetNamespace(), event.GetName(), err)
continue
}
if lastOccurrence.Before(since) {
break
}
returnEvents = append(returnEvents, event)
}
return returnEvents, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
8e2eaf32-6b76-4bae-b7c7-e208c6cd1600
|
reverseEventsList
|
['Event']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func reverseEventsList(a []Event) []Event {
for i := len(a)/2 - 1; i >= 0; i-- {
opp := len(a) - 1 - i
a[i], a[opp] = a[opp], a[i]
}
return a
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
065364e4-a92b-4ca8-a1f9-64543acd5d18
|
HaveEventsSequence
|
['"github.com/onsi/gomega/types"']
|
['haveEventsSequenceMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func HaveEventsSequence(sequence ...string) types.GomegaMatcher {
return &haveEventsSequenceMatcher{sequence: sequence}
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
2090aeb8-42c1-4e21-9410-5e37b0cc2c40
|
String
|
['"fmt"']
|
['tmpEvent']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (t tmpEvent) String() string { return fmt.Sprintf("%s - %s", t.lastTimestamp, t.reason) }
|
mco
| |||
function
|
openshift/openshift-tests-private
|
dc90bf3d-f1c4-427c-8068-aef7f16b693d
|
Len
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (a byLastTimestamp) Len() int { return len(a) }
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
290b7f74-4d07-4281-afa2-dbe8f398720f
|
Swap
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (a byLastTimestamp) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
d1edc686-8fff-471a-9243-140273ab4959
|
Less
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (a byLastTimestamp) Less(i, j int) bool {
return a[i].lastTimestamp.Before(a[j].lastTimestamp)
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
290cb148-6266-425f-8eb8-ec9326adda09
|
Match
|
['"fmt"', '"sort"', '"time"']
|
['Event', 'tmpEvent', 'haveEventsSequenceMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (matcher *haveEventsSequenceMatcher) Match(actual interface{}) (success bool, err error) {
logger.Infof("Start verifying events sequence: %s", matcher.sequence)
events, ok := actual.([]Event)
if !ok {
return false, fmt.Errorf("HaveSequence matcher expects a slice of Events in test case %v", g.CurrentSpecReport().FullText())
}
// To avoid too many "oc" executions we store the events information in a cached struct list with "lastTimestamp" and "reason" fields.
tmpEvents := []tmpEvent{}
for _, loopEvent := range events {
event := loopEvent // this is to make sure that we execute defer in all events, and not only in the last one
event.oc.NotShowInfo()
defer event.oc.SetShowInfo()
reason, err := event.Get(`{.reason}`)
if err != nil {
return false, err
}
lastTimestamp, err := event.GetLastTimestamp()
if err != nil {
return false, err
}
tmpEvents = append(tmpEvents, tmpEvent{lastTimestamp: lastTimestamp, reason: reason})
}
// We sort the cached list. Oldest event first
sort.Sort(byLastTimestamp(tmpEvents))
// Several events can be created in the same second, hence, we need to take into account
// that 2 events in the same second can match any order.
// If 2 events have the same timestamp
// we consider that the order is right no matter what.
lastEventTime := time.Time{}
for _, seqReason := range matcher.sequence {
found := false
for _, event := range tmpEvents {
if seqReason == event.reason &&
(lastEventTime.Before(event.lastTimestamp) || lastEventTime.Equal(event.lastTimestamp)) {
logger.Infof("Found! %s event in time %s", seqReason, event.lastTimestamp)
lastEventTime = event.lastTimestamp
found = true
break
}
}
// Could not find an event with the sequence's reason. We fail the match
if !found {
logger.Infof("%s event NOT Found after time %s", seqReason, lastEventTime)
return false, nil
}
}
return true, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
2ceeb42a-8f98-4c22-b4ea-6b923ffb9ffe
|
FailureMessage
|
['"fmt"']
|
['Event', 'haveEventsSequenceMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (matcher *haveEventsSequenceMatcher) FailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
events, _ := actual.([]Event)
output := "Expecte events\n"
if len(events) == 0 {
output = "No events in the list\n"
} else {
for _, event := range events {
output += fmt.Sprintf("- %s\n", event)
}
}
output += fmt.Sprintf("to contain this reason sequence\n\t%s\n", matcher.sequence)
return output
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
063b4918-a576-4260-9e98-7825834990da
|
NegatedFailureMessage
|
['"fmt"']
|
['Event', 'haveEventsSequenceMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/events.go
|
func (matcher *haveEventsSequenceMatcher) NegatedFailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
events, _ := actual.([]Event)
output := "Expecte events\n"
for _, event := range events {
output += fmt.Sprintf("- %s\n", event)
}
output += output + fmt.Sprintf("NOT to contain this reason sequence\n\t%s\n", matcher.sequence)
return output
}
|
mco
| |||
file
|
openshift/openshift-tests-private
|
4e94625b-cab4-4a2f-b538-107f56804acf
|
gomega_matchers
|
import (
"encoding/json"
"fmt"
g "github.com/onsi/ginkgo/v2"
gomegamatchers "github.com/onsi/gomega/matchers"
"github.com/onsi/gomega/types"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/gomega_matchers.go
|
package mco
import (
"encoding/json"
"fmt"
g "github.com/onsi/ginkgo/v2"
gomegamatchers "github.com/onsi/gomega/matchers"
"github.com/onsi/gomega/types"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
// struct implementing gomaega matcher interface
type conditionMatcher struct {
conditionType string
field string
expected interface{}
value string
expectedMatcher types.GomegaMatcher
currentCondition string // stores the current condition being checked, so that it can be displayed in the error message if the check fails
}
// Match checks it the condition with the given type has the right value in the given field.
func (matcher *conditionMatcher) Match(actual interface{}) (success bool, err error) {
// Check that the checked valued is a Resource
resource, ok := actual.(ResourceInterface)
if !ok {
logger.Errorf("Wrong type. Matcher expects a type implementing 'ResourceInterface'")
return false, fmt.Errorf(`Wrong type. Matcher expects a type "ResourceInterface" in test case %v`, g.CurrentSpecReport().FullText())
}
// Extract the value of the condition that we want to check
matcher.currentCondition, err = resource.Get(`{.status.conditions[?(@.type=="` + matcher.conditionType + `")]}`)
if err != nil {
return false, err
}
if matcher.currentCondition == "" {
return false, fmt.Errorf(`Condition type "%s" cannot be found in resource %s in test case %v`, matcher.conditionType, resource, g.CurrentSpecReport().FullText())
}
var conditionMap map[string]string
jsonerr := json.Unmarshal([]byte(matcher.currentCondition), &conditionMap)
if jsonerr != nil {
return false, jsonerr
}
matcher.value, ok = conditionMap[matcher.field]
if !ok {
return false, fmt.Errorf(`Condition field "%s" cannot be found in condition %s for resource %s in test case %v`,
matcher.field, matcher.conditionType, resource, g.CurrentSpecReport().FullText())
}
logger.Infof("Value: %s", matcher.value)
// Guess if we provided a value or another matcher in order to check the condition
var isMatcher bool
matcher.expectedMatcher, isMatcher = matcher.expected.(types.GomegaMatcher)
if !isMatcher {
matcher.expectedMatcher = &gomegamatchers.EqualMatcher{Expected: matcher.expected}
}
return matcher.expectedMatcher.Match(matcher.value)
}
// FailureMessage returns the message in case of successful match
func (matcher *conditionMatcher) FailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
resource, _ := actual.(ResourceInterface)
message = fmt.Sprintf("In resource %s, the following condition field '%s.%s' failed to satisfy matcher.\n%s\n", resource,
matcher.conditionType, matcher.field, matcher.expectedMatcher.FailureMessage(matcher.value))
message += matcher.currentCondition
return message
}
// FailureMessage returns the message in case of failed match
func (matcher *conditionMatcher) NegatedFailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
resource, _ := actual.(ResourceInterface)
message = fmt.Sprintf("In resource %s, the following condition field '%s.%s' failed satisified matcher, but it shouldn't:\n%s\n", resource,
matcher.conditionType, matcher.field, matcher.expectedMatcher.NegatedFailureMessage(matcher.value))
message += matcher.currentCondition
return message
}
// HaveConditionField returns the gomega matcher to check if a resource's given condition field is matching the expected value
func HaveConditionField(conditionType, conditionField string, expected interface{}) types.GomegaMatcher {
return &conditionMatcher{conditionType: conditionType, field: conditionField, expected: expected}
}
// HaveNodeDegradedMessage returns the gomega matcher to check if a resource is reporting the given degraded message
func HaveNodeDegradedMessage(expected interface{}) types.GomegaMatcher {
return &conditionMatcher{conditionType: "NodeDegraded", field: "message", expected: expected}
}
// HaveDegradedMessage returns the gomega matcher to check if a resource is reporting the given node degraded message
func HaveDegradedMessage(expected interface{}) types.GomegaMatcher {
return &conditionMatcher{conditionType: "Degraded", field: "message", expected: expected}
}
// HaveAvailableMessage returns the gomega matcher to check if a resource is reporting the given node available message
func HaveAvailableMessage(expected interface{}) types.GomegaMatcher {
return &conditionMatcher{conditionType: "Available", field: "message", expected: expected}
}
// DegradedMatcher struct implementing gomaega matcher interface to check Degraded condition
type DegradedMatcher struct {
*conditionMatcher
}
// FailureMessage returns the message in case of successful match
func (matcher *DegradedMatcher) FailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
resource, _ := actual.(ResourceInterface)
message = fmt.Sprintf("Resource %s is NOT Degraded but it should.\n%s condition: %s\n", resource, matcher.conditionType, matcher.currentCondition)
message += matcher.expectedMatcher.FailureMessage(matcher.value)
return message
}
// FailureMessage returns the message in case of failed match
func (matcher *DegradedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
resource, _ := actual.(ResourceInterface)
message = fmt.Sprintf("Resource %s is Degraded but it should not.\n%s condition: %s", resource, matcher.conditionType, matcher.currentCondition)
message += matcher.expectedMatcher.NegatedFailureMessage(matcher.value)
return message
}
// BeDegraded returns the gomega matcher to check if a resource is degraded or not.
func BeDegraded() types.GomegaMatcher {
return &DegradedMatcher{&conditionMatcher{conditionType: "Degraded", field: "status", expected: "True"}}
}
// AvailableMatcher struct implementing gomaega matcher interface to check "Available" condition
type AvailableMatcher struct {
*conditionMatcher
}
// FailureMessage returns the message in case of successful match
func (matcher *AvailableMatcher) FailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
resource, _ := actual.(ResourceInterface)
message = fmt.Sprintf("Resource %s is NOT Available but it should.\n%s condition: %s\n", resource, matcher.conditionType, matcher.currentCondition)
message += matcher.expectedMatcher.FailureMessage(matcher.value)
return message
}
// FailureMessage returns the message in case of failed match
func (matcher *AvailableMatcher) NegatedFailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
resource, _ := actual.(ResourceInterface)
message = fmt.Sprintf("Resource %s is Available but it should not.\n%s condition: %s", resource, matcher.conditionType, matcher.currentCondition)
message += matcher.expectedMatcher.NegatedFailureMessage(matcher.value)
return message
}
// BeAvailable returns the gomega matcher to check if a resource is available or not.
func BeAvailable() types.GomegaMatcher {
return &DegradedMatcher{&conditionMatcher{conditionType: "Available", field: "status", expected: "True"}}
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
f347061f-d1be-44d6-9f6e-7cefbae040cb
|
Match
|
['"encoding/json"', '"fmt"', '"github.com/onsi/gomega/types"']
|
['conditionMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/gomega_matchers.go
|
func (matcher *conditionMatcher) Match(actual interface{}) (success bool, err error) {
// Check that the checked valued is a Resource
resource, ok := actual.(ResourceInterface)
if !ok {
logger.Errorf("Wrong type. Matcher expects a type implementing 'ResourceInterface'")
return false, fmt.Errorf(`Wrong type. Matcher expects a type "ResourceInterface" in test case %v`, g.CurrentSpecReport().FullText())
}
// Extract the value of the condition that we want to check
matcher.currentCondition, err = resource.Get(`{.status.conditions[?(@.type=="` + matcher.conditionType + `")]}`)
if err != nil {
return false, err
}
if matcher.currentCondition == "" {
return false, fmt.Errorf(`Condition type "%s" cannot be found in resource %s in test case %v`, matcher.conditionType, resource, g.CurrentSpecReport().FullText())
}
var conditionMap map[string]string
jsonerr := json.Unmarshal([]byte(matcher.currentCondition), &conditionMap)
if jsonerr != nil {
return false, jsonerr
}
matcher.value, ok = conditionMap[matcher.field]
if !ok {
return false, fmt.Errorf(`Condition field "%s" cannot be found in condition %s for resource %s in test case %v`,
matcher.field, matcher.conditionType, resource, g.CurrentSpecReport().FullText())
}
logger.Infof("Value: %s", matcher.value)
// Guess if we provided a value or another matcher in order to check the condition
var isMatcher bool
matcher.expectedMatcher, isMatcher = matcher.expected.(types.GomegaMatcher)
if !isMatcher {
matcher.expectedMatcher = &gomegamatchers.EqualMatcher{Expected: matcher.expected}
}
return matcher.expectedMatcher.Match(matcher.value)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
2ae09d4d-1e99-424d-bcc0-62c246a2f772
|
FailureMessage
|
['"fmt"']
|
['conditionMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/gomega_matchers.go
|
func (matcher *conditionMatcher) FailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
resource, _ := actual.(ResourceInterface)
message = fmt.Sprintf("In resource %s, the following condition field '%s.%s' failed to satisfy matcher.\n%s\n", resource,
matcher.conditionType, matcher.field, matcher.expectedMatcher.FailureMessage(matcher.value))
message += matcher.currentCondition
return message
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
13f0b9d5-ebb4-43b1-a175-d5be2b6b822b
|
NegatedFailureMessage
|
['"fmt"']
|
['conditionMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/gomega_matchers.go
|
func (matcher *conditionMatcher) NegatedFailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
resource, _ := actual.(ResourceInterface)
message = fmt.Sprintf("In resource %s, the following condition field '%s.%s' failed satisified matcher, but it shouldn't:\n%s\n", resource,
matcher.conditionType, matcher.field, matcher.expectedMatcher.NegatedFailureMessage(matcher.value))
message += matcher.currentCondition
return message
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
e6daf9aa-4ba1-4239-aad6-b9942f16c9d5
|
HaveConditionField
|
['"github.com/onsi/gomega/types"']
|
['conditionMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/gomega_matchers.go
|
func HaveConditionField(conditionType, conditionField string, expected interface{}) types.GomegaMatcher {
return &conditionMatcher{conditionType: conditionType, field: conditionField, expected: expected}
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
d6102577-e2d1-4c35-a2a6-7b0778cd384c
|
HaveNodeDegradedMessage
|
['"github.com/onsi/gomega/types"']
|
['conditionMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/gomega_matchers.go
|
func HaveNodeDegradedMessage(expected interface{}) types.GomegaMatcher {
return &conditionMatcher{conditionType: "NodeDegraded", field: "message", expected: expected}
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
99fc97cc-8710-46a6-9873-b25d29dd794d
|
HaveDegradedMessage
|
['"github.com/onsi/gomega/types"']
|
['conditionMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/gomega_matchers.go
|
func HaveDegradedMessage(expected interface{}) types.GomegaMatcher {
return &conditionMatcher{conditionType: "Degraded", field: "message", expected: expected}
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
1b60de62-74af-4768-9829-1730f783442b
|
HaveAvailableMessage
|
['"github.com/onsi/gomega/types"']
|
['conditionMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/gomega_matchers.go
|
func HaveAvailableMessage(expected interface{}) types.GomegaMatcher {
return &conditionMatcher{conditionType: "Available", field: "message", expected: expected}
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
73275324-e9cc-4587-91b1-7556bbd0494c
|
FailureMessage
|
['"fmt"']
|
['DegradedMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/gomega_matchers.go
|
func (matcher *DegradedMatcher) FailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
resource, _ := actual.(ResourceInterface)
message = fmt.Sprintf("Resource %s is NOT Degraded but it should.\n%s condition: %s\n", resource, matcher.conditionType, matcher.currentCondition)
message += matcher.expectedMatcher.FailureMessage(matcher.value)
return message
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
5ed1057f-7dd0-457e-8d62-6dfec00ee67c
|
NegatedFailureMessage
|
['"fmt"']
|
['DegradedMatcher']
|
github.com/openshift/openshift-tests-private/test/extended/mco/gomega_matchers.go
|
func (matcher *DegradedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
// The type was already validated in Match, we can safely ignore the error
resource, _ := actual.(ResourceInterface)
message = fmt.Sprintf("Resource %s is Degraded but it should not.\n%s condition: %s", resource, matcher.conditionType, matcher.currentCondition)
message += matcher.expectedMatcher.NegatedFailureMessage(matcher.value)
return message
}
|
mco
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.