element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case
|
openshift/openshift-tests-private
|
9aee96db-a26d-43f0-9c00-79e21a5f76fd
|
Author:tagao-High-60485-check On/Off switch of netdev Collector in Node Exporter [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-60485-check On/Off switch of netdev Collector in Node Exporter [Serial]", func() {
var (
disableNetdev = filepath.Join(monitoringBaseDir, "disableNetdev.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check netdev Collector is enabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.netdev"))
exutil.By("check netdev metrics in prometheus k8s pod")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="netdev"}'`, token, `"collector":"netdev"`, uwmLoadTime)
exutil.By("disable netdev in CMO")
createResourceFromYaml(oc, "openshift-monitoring", disableNetdev)
exutil.By("check netdev metrics in prometheus k8s pod again, should not have related metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="netdev"}'`, token, `"result":[]`, 3*uwmLoadTime)
exutil.By("check netdev in daemonset")
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output2).To(o.ContainSubstring("--no-collector.netdev"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
534ac5ef-f674-432a-ab89-0f7bb845fb60
|
Author:tagao-High-59521-check On/Off switch of cpufreq Collector in Node Exporter [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-59521-check On/Off switch of cpufreq Collector in Node Exporter [Serial]", func() {
var (
enableCpufreq = filepath.Join(monitoringBaseDir, "enableCpufreq.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check cpufreq Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.cpufreq"))
exutil.By("check cpufreq metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="cpufreq"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable cpufreq in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableCpufreq)
exutil.By("check cpufreq metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="cpufreq"}'`, token, `"collector":"cpufreq"`, 3*uwmLoadTime)
exutil.By("check cpufreq in daemonset")
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output2).To(o.ContainSubstring("--collector.cpufreq"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
004ba544-cdf2-49ed-8503-69024f5d22b0
|
Author:tagao-High-60480-check On/Off switch of tcpstat Collector in Node Exporter [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-60480-check On/Off switch of tcpstat Collector in Node Exporter [Serial]", func() {
var (
enableTcpstat = filepath.Join(monitoringBaseDir, "enableTcpstat.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check tcpstat Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.tcpstat"))
exutil.By("check tcpstat metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="tcpstat"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable tcpstat in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableTcpstat)
exutil.By("check tcpstat metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="tcpstat"}'`, token, `"collector":"tcpstat"`, 3*uwmLoadTime)
exutil.By("check tcpstat in daemonset")
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output2).To(o.ContainSubstring("--collector.tcpstat"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
1307a26d-5d56-455a-abc1-e870630dff54
|
Author:tagao-High-60582-check On/Off switch of buddyinfo Collector in Node Exporter [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-60582-check On/Off switch of buddyinfo Collector in Node Exporter [Serial]", func() {
var (
enableBuddyinfo = filepath.Join(monitoringBaseDir, "enableBuddyinfo.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check buddyinfo Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.buddyinfo"))
exutil.By("check buddyinfo metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="buddyinfo"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable buddyinfo in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableBuddyinfo)
exutil.By("check buddyinfo metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="buddyinfo"}'`, token, `"collector":"buddyinfo"`, 3*uwmLoadTime)
exutil.By("check buddyinfo in daemonset")
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output2).To(o.ContainSubstring("--collector.buddyinfo"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
1896f554-fdd9-494c-b9c0-270b27bceec6
|
Author:juzhao-Medium-59986-Allow to configure secrets in alertmanager component [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-59986-Allow to configure secrets in alertmanager component [Serial]", func() {
var (
alertmanagerSecret = filepath.Join(monitoringBaseDir, "alertmanager-secret.yaml")
alertmanagerSecretCM = filepath.Join(monitoringBaseDir, "alertmanager-secret-cm.yaml")
alertmanagerSecretUwmCM = filepath.Join(monitoringBaseDir, "alertmanager-secret-uwm-cm.yaml")
)
exutil.By("delete secrets/user-workload-monitoring-config/cluster-monitoring-config configmap at the end of a serial case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "test-secret", "-n", "openshift-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "slack-api-token", "-n", "openshift-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "test-secret", "-n", "openshift-user-workload-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "slack-api-token", "-n", "openshift-user-workload-monitoring").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("create alertmanager secret in openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", alertmanagerSecret)
exutil.By("enabled UWM and configure alertmanager secret setting in cluster-monitoring-config configmap")
createResourceFromYaml(oc, "openshift-monitoring", alertmanagerSecretCM)
exutil.By("check if the secrets are mounted to alertmanager pod")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
checkConfigInPod(oc, "openshift-monitoring", "alertmanager-main-0", "alertmanager", "ls /etc/alertmanager/secrets/", "test-secret")
checkConfigInPod(oc, "openshift-monitoring", "alertmanager-main-0", "alertmanager", "ls /etc/alertmanager/secrets/", "slack-api-token")
exutil.By("create the same alertmanager secret in openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", alertmanagerSecret)
exutil.By("configure alertmanager secret setting in user-workload-monitoring-config configmap")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", alertmanagerSecretUwmCM)
exutil.By("check if the secrets are mounted to UWM alertmanager pod")
exutil.AssertAllPodsToBeReady(oc, "openshift-user-workload-monitoring")
checkConfigInPod(oc, "openshift-user-workload-monitoring", "alertmanager-user-workload-0", "alertmanager", "ls /etc/alertmanager/secrets/", "test-secret")
checkConfigInPod(oc, "openshift-user-workload-monitoring", "alertmanager-user-workload-0", "alertmanager", "ls /etc/alertmanager/secrets/", "slack-api-token")
})
| |||||
test case
|
openshift/openshift-tests-private
|
58b81730-c747-4d79-8bdb-75823b589d7e
|
Author:juzhao-Medium-60532-TechPreview feature is not enabled and collectionProfile is set to valid value [Serial]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-60532-TechPreview feature is not enabled and collectionProfile is set to valid value [Serial]", func() {
var (
collectionProfileminimal = filepath.Join(monitoringBaseDir, "collectionProfile_minimal.yaml")
)
exutil.By("delete user-workload-monitoring-config/cluster-monitoring-config configmap at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("skip the case in TechPreview feature enabled cluster")
featureSet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("FeatureGate/cluster", "-ojsonpath={.spec}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("featureSet is: %s", featureSet)
if featureSet != "{}" && strings.Contains(featureSet, "TechPreviewNoUpgrade") {
g.Skip("This case is not suitable for TechPreview enabled cluster!")
}
exutil.By("set collectionProfile to minimal in cluster-monitoring-config configmap")
createResourceFromYaml(oc, "openshift-monitoring", collectionProfileminimal)
exutil.By("should see error in CMO logs which indicate collectionProfiles is a TechPreview feature")
CMOPodName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=cluster-monitoring-operator", "-ojsonpath={.items[].metadata.name}").Output()
checkLogsInContainer(oc, "openshift-monitoring", CMOPodName, "cluster-monitoring-operator", "collectionProfiles is currently a TechPreview feature")
})
| |||||
test case
|
openshift/openshift-tests-private
|
f1cd3548-ddd3-4c2b-b84b-3f3d85a9cf7b
|
Author:tagao-Low-60534-check gomaxprocs setting of Node Exporter in CMO [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Low-60534-check gomaxprocs setting of Node Exporter in CMO [Serial]", func() {
var (
setGomaxprocsTo1 = filepath.Join(monitoringBaseDir, "setGomaxprocsTo1.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check default gomaxprocs value is 0")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset", "node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--runtime.gomaxprocs=0"))
exutil.By("set gomaxprocs value to 1")
createResourceFromYaml(oc, "openshift-monitoring", setGomaxprocsTo1)
exutil.By("check gomaxprocs value in daemonset")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
cmd := "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "daemonset", "node-exporter", cmd, "--runtime.gomaxprocs=1", true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
772629f6-3411-4010-991a-e77af397c67c
|
Author:tagao-High-60486-check On/Off switch of netclass Collector and netlink backend in Node Exporter [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-60486-check On/Off switch of netclass Collector and netlink backend in Node Exporter [Serial]", func() {
var (
disableNetclass = filepath.Join(monitoringBaseDir, "disableNetclass.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check netclass Collector is enabled by default, so as netlink")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
//oc -n openshift-monitoring get daemonset.apps/node-exporter -ojsonpath='{.spec.template.spec.containers[?(@.name=="node-exporter")].args}'
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.netclass"))
o.Expect(output).To(o.ContainSubstring("--collector.netclass.netlink"))
exutil.By("check netclass metrics in prometheus k8s pod")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="netclass"}'`, token, `"collector":"netclass"`, uwmLoadTime)
exutil.By("disable netclass in CMO")
createResourceFromYaml(oc, "openshift-monitoring", disableNetclass)
exutil.By("check netclass metrics in prometheus k8s pod again, should not have related metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="netclass"}'`, token, `"result":[]`, 3*uwmLoadTime)
exutil.By("check netclass/netlink in daemonset")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.netclass"))
o.Expect(output).NotTo(o.ContainSubstring("--collector.netclass.netlink"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
dc9aa13e-8cbb-45c6-bdf5-1bd715aebe9f
|
Author:tagao-High-63659-check On/Off switch of ksmd Collector in Node Exporter [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-63659-check On/Off switch of ksmd Collector in Node Exporter [Serial]", func() {
var (
enableKsmd = filepath.Join(monitoringBaseDir, "enableKsmd.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check ksmd Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.ksmd"))
exutil.By("check ksmd metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="ksmd"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable ksmd in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableKsmd)
exutil.By("check ksmd metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="ksmd"}'`, token, `"collector":"ksmd"`, 3*uwmLoadTime)
exutil.By("check ksmd in daemonset")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.ksmd"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
dca13495-b2f7-48e1-827a-90f2ecb73bdf
|
Author:tagao-LEVEL0-High-64537-CMO deploys monitoring console-plugin [Serial]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-LEVEL0-High-64537-CMO deploys monitoring console-plugin [Serial]", func() {
var (
monitoringPluginConfig = filepath.Join(monitoringBaseDir, "monitoringPlugin-config.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("skip the case if console CO is absent")
checkCO, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(checkCO, "console") {
g.Skip("This case is not executable when console CO is absent")
}
exutil.By("apply monitoringPlugin config and check config applied")
createResourceFromYaml(oc, "openshift-monitoring", monitoringPluginConfig)
//check new config takes effect
cmd := "-ojsonpath={.spec.template.spec.containers[].resources}"
checkYamlconfig(oc, "openshift-monitoring", "deployment", "monitoring-plugin", cmd, `{"limits":{"cpu":"30m","memory":"120Mi"},"requests":{"cpu":"15m","memory":"60Mi"}}`, true)
exutil.By("check monitoring-plugin ConsolePlugin/PodDisruptionBudget/ServiceAccount/Service are exist")
resourceNames := []string{"ConsolePlugin", "ServiceAccount", "Service"}
for _, resource := range resourceNames {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource, "monitoring-plugin", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("monitoring-plugin"))
o.Expect(err).NotTo(o.HaveOccurred())
}
//SNO cluster do not have PDB under openshift-monitoring
//hypershift-hosted cluster do not have master node
checkPodDisruptionBudgetIfNotSNO(oc)
exutil.By("check monitoring-plugin pods are ready")
getReadyPodsWithLabels(oc, "openshift-monitoring", "app.kubernetes.io/component=monitoring-plugin")
exutil.By("get monitoring-plugin pod name")
monitoringPluginPodNames, err := getAllRunningPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/component=monitoring-plugin")
o.Expect(err).NotTo(o.HaveOccurred())
getDeploymentReplicas(oc, "openshift-monitoring", "monitoring-plugin")
waitForPodsToMatchReplicas(oc, "openshift-monitoring", "monitoring-plugin", "app.kubernetes.io/component=monitoring-plugin")
exutil.By("check monitoring-plugin pod config")
e2e.Logf("monitoringPluginPodNames: %v", monitoringPluginPodNames)
for _, pod := range monitoringPluginPodNames {
exutil.AssertPodToBeReady(oc, pod, "openshift-monitoring")
cmd := "-ojsonpath={.spec.nodeSelector}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `{"node-role.kubernetes.io/worker":""}`, true)
cmd = "-ojsonpath={.spec.topologySpreadConstraints}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `{"maxSkew":1,"topologyKey":"kubernetes.io/hostname","whenUnsatisfiable":"DoNotSchedule"}`, true)
cmd = "-ojsonpath={.spec.tolerations}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `{"operator":"Exists"}`, true)
cmd = "-ojsonpath={.spec.containers[].resources}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"requests":{"cpu":"15m","memory":"60Mi"}`, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"limits":{"cpu":"30m","memory":"120Mi"}`, true)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
e50ad325-bad3-4686-9a5b-06d83415c106
|
Author:tagao-High-63657-check On/Off switch of systemd Collector in Node Exporter [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-63657-check On/Off switch of systemd Collector in Node Exporter [Serial]", func() {
var (
enableSystemdUnits = filepath.Join(monitoringBaseDir, "enableSystemdUnits.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check systemd Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.systemd"))
exutil.By("check systemd metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="systemd"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable systemd and units in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableSystemdUnits)
exutil.By("check systemd related metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="systemd"}'`, token, `"collector":"systemd"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_systemd_system_running'`, token, `"node_systemd_system_running"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_systemd_timer_last_trigger_seconds'`, token, `"node_systemd_timer_last_trigger_seconds"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_systemd_units'`, token, `"node_systemd_units"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_systemd_version'`, token, `"node_systemd_version"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_systemd_unit_state'`, token, `"node_systemd_unit_state"`, 3*uwmLoadTime)
exutil.By("check systemd in daemonset")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.systemd"))
o.Expect(output).To(o.ContainSubstring("--collector.systemd.unit-include=^(network.+|nss.+|logrotate.timer)$"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
67f20fc6-9e2f-492f-8f3c-fda8c8b72dbc
|
Author:tagao-High-63658-check On/Off switch of mountstats Collector in Node Exporter [Serial]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-63658-check On/Off switch of mountstats Collector in Node Exporter [Serial]", func() {
var (
enableMountstats = filepath.Join(monitoringBaseDir, "enableMountstats.yaml")
enableMountstatsNFS = filepath.Join(monitoringBaseDir, "enableMountstats_nfs.yaml")
)
exutil.By("delete uwm-config/cm-config and pvcs at the end of the case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", "-l", "app.kubernetes.io/name=prometheus", "-n", "openshift-monitoring").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check mountstats collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.mountstats"))
exutil.By("check mountstats metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="mountstats"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable mountstats in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableMountstats)
exutil.By("check mountstats metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="mountstats"}'`, token, `"collector":"mountstats"`, 3*uwmLoadTime)
exutil.By("check mountstats in daemonset")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.mountstats"))
exutil.By("check nfs metrics if need")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("sc").Output()
if strings.Contains(output, "nfs") {
createResourceFromYaml(oc, "openshift-monitoring", enableMountstatsNFS)
exutil.AssertPodToBeReady(oc, "prometheus-k8s-0", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_mountstats_nfs_read_bytes_total'`, token, `"__name__":"node_mountstats_nfs_read_bytes_total"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_mountstats_nfs_write_bytes_total'`, token, `"__name__":"node_mountstats_nfs_write_bytes_total"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_mountstats_nfs_operations_requests_total'`, token, `"__name__":"node_mountstats_nfs_operations_requests_total"`, 3*uwmLoadTime)
} else {
e2e.Logf("no need to check nfs metrics for this env")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
1fc48c47-d264-489a-b537-04bbff8ac266
|
Author:tagao-Medium-64868-netclass/netdev device configuration [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-64868-netclass/netdev device configuration [Serial]", func() {
var (
ignoredNetworkDevices = filepath.Join(monitoringBaseDir, "ignoredNetworkDevices-lo.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check netclass/netdev device configuration")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15}|enP.*|ovn-k8s-mp[0-9]*|br-ex|br-int|br-ext|br[0-9]*|tun[0-9]*|cali[a-f0-9]*)$"))
o.Expect(output).To(o.ContainSubstring("--collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15}|enP.*|ovn-k8s-mp[0-9]*|br-ex|br-int|br-ext|br[0-9]*|tun[0-9]*|cali[a-f0-9]*)$"))
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check lo devices exist, and able to see related metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=group by(device) (node_network_info)'`, token, `"device":"lo"`, uwmLoadTime)
exutil.By("modify cm to ignore lo devices")
createResourceFromYaml(oc, "openshift-monitoring", ignoredNetworkDevices)
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
exutil.By("check metrics again, should not see lo device metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_network_info{device="lo"}'`, token, `"result":[]`, 3*uwmLoadTime)
exutil.By("check netclass/netdev device configuration, no lo devices")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.netclass.ignored-devices=^(lo)$"))
o.Expect(output).To(o.ContainSubstring("--collector.netdev.device-exclude=^(lo)$"))
exutil.By("modify cm to ignore all devices")
// % oc -n openshift-monitoring patch cm cluster-monitoring-config -p '{"data": {"config.yaml": "nodeExporter:\n ignoredNetworkDevices: [.*]"}}' --type=merge
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "cluster-monitoring-config", "-p", `{"data": {"config.yaml": "nodeExporter:\n ignoredNetworkDevices: [.*]"}}`, "--type=merge", "-n", "openshift-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check metrics again, should not see all device metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=group by(device) (node_network_info)'`, token, `"result":[]`, 3*uwmLoadTime)
exutil.By("check netclass/netdev device configuration again")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.netclass.ignored-devices=^(.*)$"))
o.Expect(output).To(o.ContainSubstring("--collector.netdev.device-exclude=^(.*)$"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
1ada17a0-97c0-44a2-91ca-c768b946727b
|
Author:tagao-LEVEL0-Medium-64296-disable CORS headers on Thanos querier [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-LEVEL0-Medium-64296-disable CORS headers on Thanos querier [Serial]", func() {
var (
enableCORS = filepath.Join(monitoringBaseDir, "enableCORS.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check the default enableCORS value is false")
// oc -n openshift-monitoring get deployments.apps thanos-querier -o jsonpath='{.spec.template.spec.containers[?(@.name=="thanos-query")].args}' |jq
thanosQueryArgs, getArgsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployments/thanos-querier", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"thanos-query\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(getArgsErr).NotTo(o.HaveOccurred(), "Failed to get thanos-query container args definition")
o.Expect(thanosQueryArgs).To(o.ContainSubstring("--web.disable-cors"))
exutil.By("set enableCORS as true")
createResourceFromYaml(oc, "openshift-monitoring", enableCORS)
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
exutil.By("check the config again")
cmd := "-ojsonpath={.spec.template.spec.containers[?(@.name==\"thanos-query\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "deployments", "thanos-querier", cmd, `--web.disable-cors`, false)
})
| |||||
test case
|
openshift/openshift-tests-private
|
a7d958f3-dd8e-4262-8fd7-03d22a44d070
|
Author:tagao-Medium-43106-disable Alertmanager deployment[Serial]
|
['"context"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-43106-disable Alertmanager deployment[Serial]", func() {
var (
disableAlertmanager = filepath.Join(monitoringBaseDir, "disableAlertmanager.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("disable alertmanager in CMO config")
createResourceFromYaml(oc, "openshift-monitoring", disableAlertmanager)
exutil.AssertAllPodsToBeReady(oc, "openshift-user-workload-monitoring")
// this step is aim to give time let CMO removing alertmanager resources
exutil.By("confirm alertmanager is down")
checkPodDeleted(oc, "openshift-monitoring", "alertmanager=main", "alertmanager")
exutil.By("check alertmanager resources are removed")
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 90*time.Second, false, func(context.Context) (bool, error) {
resourceNames := []string{"route", "servicemonitor", "serviceaccounts", "statefulset", "services", "endpoints", "alertmanagers", "prometheusrules", "clusterrolebindings", "roles"}
for _, resource := range resourceNames {
output, outputErr := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource, "-n", "openshift-monitoring").Output()
if outputErr != nil || strings.Contains(output, "alertmanager") {
return false, nil
}
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "one or more alertmanager resources not removed yet")
exutil.By("check on clusterroles")
clusterroles, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterroles", "-l", "app.kubernetes.io/part-of=openshift-monitoring").Output()
o.Expect(clusterroles).NotTo(o.ContainSubstring("alertmanager"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check on configmaps")
checkCM, _ := exec.Command("bash", "-c", `oc -n openshift-monitoring get cm -l app.kubernetes.io/managed-by=cluster-monitoring-operator | grep alertmanager`).Output()
e2e.Logf("check result is: %v", checkCM)
o.Expect(checkCM).NotTo(o.ContainSubstring("alertmanager-trusted-ca-bundle"))
exutil.By("check on rolebindings")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("rolebindings", "-n", "openshift-monitoring").Output()
o.Expect(output).NotTo(o.ContainSubstring("alertmanager-prometheusk8s"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check Watchdog alert exist")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertstate="firing",alertname="Watchdog"}'`, token, `"alertname":"Watchdog"`, uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
5b7faa02-18d9-4ff1-976b-4636ae312b1a
|
Author:juzhao-Medium-66736-add option to specify resource requests and limits for components [Serial]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-66736-add option to specify resource requests and limits for components [Serial]", func() {
var (
clusterResources = filepath.Join(monitoringBaseDir, "cluster_resources.yaml")
uwmResources = filepath.Join(monitoringBaseDir, "uwm_resources.yaml")
)
exutil.By("delete user-workload-monitoring-config/cluster-monitoring-config configmap at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
createResourceFromYaml(oc, "openshift-monitoring", clusterResources)
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
exutil.By("by default there is not resources.limits setting for the components, check the result for kube_pod_container_resource_limits of node-exporter pod to see if the setting loaded to components, same for other components")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="node-exporter",namespace="openshift-monitoring"}'`, token, `"pod":"node-exporter-`, 3*uwmLoadTime)
exutil.By("check the resources.requests and resources.limits setting loaded to node-exporter daemonset")
// oc -n openshift-monitoring get daemonset node-exporter -o jsonpath='{.spec.template.spec.containers[?(@.name=="node-exporter")].resources.requests}'
result, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get node-exporter container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"40Mi"`))
// oc -n openshift-monitoring get daemonset node-exporter -o jsonpath='{.spec.template.spec.containers[?(@.name=="node-exporter")].resources.limits}'
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get node-exporter container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"20m","memory":"100Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for kube-state-metrics")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="kube-state-metrics",namespace="openshift-monitoring"}'`, token, `"pod":"kube-state-metrics-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/kube-state-metrics", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"kube-state-metrics\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get kube-state-metrics container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"3m","memory":"100Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/kube-state-metrics", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"kube-state-metrics\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get kube-state-metrics container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"200Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for openshift-state-metrics")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="openshift-state-metrics",namespace="openshift-monitoring"}'`, token, `"pod":"openshift-state-metrics-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/openshift-state-metrics", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"openshift-state-metrics\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get openshift-state-metrics container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"2m","memory":"40Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/openshift-state-metrics", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"openshift-state-metrics\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get openshift-state-metrics container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"20m","memory":"100Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for metrics-server")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="metrics-server",namespace="openshift-monitoring"}'`, token, `"pod":"metrics-server-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/metrics-server", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"metrics-server\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get metrics-server container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"2m","memory":"80Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/metrics-server", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"metrics-server\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get metrics-server container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"100Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for prometheus-operator")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="prometheus-operator",namespace="openshift-monitoring"}'`, token, `"pod":"prometheus-operator-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get prometheus-operator container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"200Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get prometheus-operator container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"20m","memory":"300Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for prometheus-operator-admission-webhook")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="prometheus-operator-admission-webhook",namespace="openshift-monitoring"}'`, token, `"pod":"prometheus-operator-admission-webhook-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator-admission-webhook", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator-admission-webhook\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get prometheus-operator-admission-webhook container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"50Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator-admission-webhook", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator-admission-webhook\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get prometheus-operator-admission-webhook container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"20m","memory":"100Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for telemeter-client")
telemeterPod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "app.kubernetes.io/name=telemeter-client", "-n", "openshift-monitoring").Output()
if strings.Contains(telemeterPod, "telemeter-client") {
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="telemeter-client",namespace="openshift-monitoring"}'`, token, `"pod":"telemeter-client-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/telemeter-client", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"telemeter-client\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get telemeter-client container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"2m","memory":"50Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/telemeter-client", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"telemeter-client\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get telemeter-client container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"100Mi"`))
}
createResourceFromYaml(oc, "openshift-user-workload-monitoring", uwmResources)
exutil.AssertAllPodsToBeReady(oc, "openshift-user-workload-monitoring")
exutil.By("check the resources.requests and resources.limits for uwm prometheus-operator")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="prometheus-operator",namespace="openshift-user-workload-monitoring"}'`, token, `"pod":"prometheus-operator-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].resources.requests}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get UWM prometheus-operator container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"2m","memory":"20Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].resources.limits}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get UWM prometheus-operator container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"100Mi"`))
})
| |||||
test case
|
openshift/openshift-tests-private
|
be5bfbef-ae9a-4500-9083-606b79a5f25e
|
Author:tagao-High-67503-check On/Off switch of processes Collector in Node Exporter [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-67503-check On/Off switch of processes Collector in Node Exporter [Serial]", func() {
var (
enableProcesses = filepath.Join(monitoringBaseDir, "enableProcesses.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check processes Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.processes"))
exutil.By("check processes metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="processes"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable processes in CMO config")
createResourceFromYaml(oc, "openshift-monitoring", enableProcesses)
exutil.By("check processes metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="processes"}'`, token, `"collector":"processes"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_processes_max_processes'`, token, `"__name__":"node_processes_max_processes"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_processes_pids'`, token, `"__name__":"node_processes_pids"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_processes_state'`, token, `"__name__":"node_processes_state"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_processes_threads'`, token, `"__name__":"node_processes_threads"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_processes_threads_state'`, token, `"__name__":"node_processes_threads_state"`, 3*uwmLoadTime)
exutil.By("check processes in daemonset")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.processes"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
e6b463c8-845d-4768-a77c-5a521ae95758
|
Author:tagao-Medium-73009-CMO is correctly forwarding current proxy config to the prometheus operator in remote write configs [Serial]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-73009-CMO is correctly forwarding current proxy config to the prometheus operator in remote write configs [Serial]", func() {
var (
remotewriteCM = filepath.Join(monitoringBaseDir, "example-remotewrite-cm.yaml")
)
exutil.By("check cluster proxy")
checkProxy, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-ojsonpath={.spec}").Output()
if checkProxy == "{}" || !strings.Contains(checkProxy, `http`) {
g.Skip("This case should execute on a proxy cluster!")
}
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Create example remotewrite cm under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", remotewriteCM)
exutil.By("get http and https proxy URL")
httpProxy, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-ojsonpath={.spec.httpProxy}").Output()
httpsProxy, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-ojsonpath={.spec.httpsProxy}").Output()
e2e.Logf("httpProxy:\n%s", httpProxy)
e2e.Logf("httpsProxy:\n%s", httpsProxy)
exutil.By("check prometheus remoteWrite configs applied")
cmd := "-ojsonpath={.spec.remoteWrite[]}"
checkValue := `"url":"https://test.remotewrite.com/api/write"`
checkYamlconfig(oc, "openshift-monitoring", "prometheuses", "k8s", cmd, checkValue, true)
proxyUrl, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheuses", "k8s", "-ojsonpath={.spec.remoteWrite[].proxyUrl}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("proxyUrl:\n%s", proxyUrl)
exutil.By("check remoteWrite proxyUrl should be same as cluster proxy")
if strings.Contains(proxyUrl, httpsProxy) {
o.Expect(proxyUrl).NotTo(o.Equal(""))
o.Expect(proxyUrl).To(o.Equal(httpsProxy))
}
if !strings.Contains(proxyUrl, httpsProxy) {
o.Expect(proxyUrl).NotTo(o.Equal(""))
o.Expect(proxyUrl).To(o.Equal(httpProxy))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
f8e0a382-d657-405f-a95d-e39a84bad142
|
Author:tagao-Medium-73834-trigger PrometheusOperatorRejectedResources alert [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-73834-trigger PrometheusOperatorRejectedResources alert [Serial]", func() {
var (
PrometheusOperatorRejectedResources = filepath.Join(monitoringBaseDir, "PrometheusOperatorRejectedResources.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check the alert exist")
cmd := "-ojsonpath={.spec.groups[].rules[?(@.alert==\"PrometheusOperatorRejectedResources\")]}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "prometheus-operator-rules", cmd, "PrometheusOperatorRejectedResources", true)
exutil.By("trigger PrometheusOperatorRejectedResources alert")
oc.SetupProject()
ns := oc.Namespace()
createResourceFromYaml(oc, ns, PrometheusOperatorRejectedResources)
exutil.By("check alert metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusOperatorRejectedResources"}'`, token, `PrometheusOperatorRejectedResources`, 3*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
1d2ef6e8-bb6c-43b1-af86-9514c2029ac4
|
Author:tagao-Medium-73805-trigger PrometheusRuleFailures alert [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-73805-trigger PrometheusRuleFailures alert [Serial]", func() {
var (
PrometheusRuleFailures = filepath.Join(monitoringBaseDir, "PrometheusRuleFailures.yaml")
)
exutil.By("delete uwm-config/cm-config and test alert at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("PrometheusRule", "example-alert", "-n", "openshift-monitoring", "--ignore-not-found").Execute()
exutil.By("check the alert exist")
cmd := "-ojsonpath={.spec.groups[].rules[?(@.alert==\"PrometheusRuleFailures\")]}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "prometheus-k8s-prometheus-rules", cmd, "PrometheusRuleFailures", true)
exutil.By("trigger PrometheusRuleFailures alert")
createResourceFromYaml(oc, "openshift-monitoring", PrometheusRuleFailures)
exutil.By("check alert metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=sum(irate(container_network_receive_bytes_total{pod!=""}[5m])) BY (pod, interface) + on(pod, interface) group_left(network_name) pod_network_name_info'`, token, `"error":"found duplicate series for the match group`, uwmLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusRuleFailures"}'`, token, `PrometheusRuleFailures`, 3*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
e145e59c-4b59-435f-97fd-debf5c3b0b74
|
Author:tagao-Medium-73804-trigger TargetDown alert [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-73804-trigger TargetDown alert [Serial]", func() {
var (
exampleApp = filepath.Join(monitoringBaseDir, "example-app.yaml")
)
exutil.By("delete uwm-config/cm-config and example-app at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("deployment/prometheus-example-app", "service/prometheus-example-app", "servicemonitor/prometheus-example-monitor", "-n", "openshift-monitoring", "--ignore-not-found").Execute()
exutil.By("check the alert exist")
cmd := "-ojsonpath={.spec.groups[].rules[?(@.alert==\"TargetDown\")]}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "cluster-monitoring-operator-prometheus-rules", cmd, "TargetDown", true)
exutil.By("trigger TargetDown alert")
createResourceFromYaml(oc, "openshift-monitoring", exampleApp)
//% oc patch ServiceMonitor/prometheus-example-monitor -n openshift-monitoring --type json -p '[{"op": "add", "path": "/spec/endpoints/0/scheme", "value": "https"}]'
patchConfig := `[{"op": "add", "path": "/spec/endpoints/0/scheme", "value":"https"}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("servicemonitor", "prometheus-example-monitor", "-p", patchConfig, "--type=json", "-n", "openshift-monitoring").Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("check alert metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="TargetDown",job="prometheus-example-app"}'`, token, `"alertname":"TargetDown"`, 3*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
a57bf8db-3a74-46b4-8572-3c63009e7146
|
Author:tagao-Medium-74734-Alert for broken Prometheus Kube Service Discovery
|
['"path/filepath"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-74734-Alert for broken Prometheus Kube Service Discovery", func() {
var (
exampleApp = filepath.Join(monitoringBaseDir, "example-app.yaml")
)
exutil.By("confirm the alert existed")
// % oc -n openshift-monitoring get prometheusrules prometheus-k8s-prometheus-rules -ojsonpath='{.spec.groups[].rules[?(@.alert=="PrometheusKubernetesListWatchFailures")]}' |jq
cmd := "-ojsonpath={.spec.groups[].rules[?(@.alert==\"PrometheusKubernetesListWatchFailures\")]}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "prometheus-k8s-prometheus-rules", cmd, `"alert":"PrometheusKubernetesListWatchFailures"`, true)
exutil.By("create a namespace and deploy example-app")
oc.SetupProject()
ns := oc.Namespace()
createResourceFromYaml(oc, ns, exampleApp)
exutil.By("add label to the namespace")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ns, "openshift.io/cluster-monitoring-").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ns, "openshift.io/cluster-monitoring=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
label, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespace", ns, `-ojsonpath={.metadata.labels}`).Output()
e2e.Logf("test namespace labels: \n%v", label)
o.Expect(label).To(o.ContainSubstring(`openshift.io/cluster-monitoring":"true`))
exutil.By("confirm prometheus pod is ready")
assertPodToBeReady(oc, "prometheus-k8s-0", "openshift-monitoring")
exutil.By("confirm thanos-query pod is ready")
//% oc get pod -n openshift-monitoring -l app.kubernetes.io/name=thanos-query
waitErr := oc.AsAdmin().WithoutNamespace().Run("wait").Args("pod", "-l", "app.kubernetes.io/name=thanos-query", "-n", "openshift-monitoring", "--for=condition=Ready", "--timeout=3m").Execute()
o.Expect(waitErr).NotTo(o.HaveOccurred())
// debug log
MONpod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring").Output()
e2e.Logf("the MON pods condition: %s", MONpod)
exutil.By("check the alert is triggered")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusKubernetesListWatchFailures"}'`, token, `"alertname":"PrometheusKubernetesListWatchFailures"`, 3*uwmLoadTime)
exutil.By("check logs in prometheus pod")
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus", "prometheus", `cannot list resource \"pods\" in API group \"\" in the namespace \"`+ns+`\"`, true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
e4c72c05-7ffe-4b3d-997d-54d9417cad74
|
Author:tagao-Medium-74311-trigger PrometheusRemoteWriteBehind alert [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-74311-trigger PrometheusRemoteWriteBehind alert [Serial]", func() {
var (
PrometheusRemoteWriteBehind = filepath.Join(monitoringBaseDir, "PrometheusRemoteWriteBehind.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("create fake remoteWrite")
createResourceFromYaml(oc, "openshift-monitoring", PrometheusRemoteWriteBehind)
exutil.By("check the alert exist")
cmd := "-ojsonpath={.spec.groups[].rules[?(@.alert==\"PrometheusRemoteWriteBehind\")]}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "prometheus-k8s-prometheus-rules", cmd, "PrometheusRemoteWriteBehind", true)
exutil.By("check logs in pod")
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus", "prometheus", "no such host", true)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check alert triggered")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusRemoteWriteBehind"}'`, token, `"alertname":"PrometheusRemoteWriteBehind"`, 2*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
38f27e13-4430-4764-9a66-8f639576c7d6
|
Author:tagao-Medium-76282-monitoring-plugin should reload cert/key files dynamically [Serial]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-76282-monitoring-plugin should reload cert/key files dynamically [Serial]", func() {
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check openshift-monitoring/monitoring-plugin-cert secret exist")
//% oc -n openshift-monitoring get secret monitoring-plugin-cert -ojsonpath='{.data}'
cmd := "-ojsonpath={.data}"
checkYamlconfig(oc, "openshift-monitoring", "secret", "monitoring-plugin-cert", cmd, `tls.crt`, true)
checkYamlconfig(oc, "openshift-monitoring", "secret", "monitoring-plugin-cert", cmd, `tls.key`, true)
secretBefore, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "monitoring-plugin-cert", "-ojsonpath={.data}", "-n", "openshift-monitoring").Output()
exutil.By("delete openshift-monitoring/monitoring-plugin-cert secret")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "monitoring-plugin-cert", "-n", "openshift-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check the secret re-created")
checkYamlconfig(oc, "openshift-monitoring", "secret", "monitoring-plugin-cert", cmd, `tls.crt`, true)
checkYamlconfig(oc, "openshift-monitoring", "secret", "monitoring-plugin-cert", cmd, `tls.key`, true)
secretAfter, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "monitoring-plugin-cert", "-ojsonpath={.data}", "-n", "openshift-monitoring").Output()
exutil.By("check the secret have a new hash")
if strings.Compare(secretBefore, secretAfter) == 0 {
e2e.Failf("secret not changed!")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
a5408e0d-955f-4307-9bff-c4fd60640929
|
Author:tagao-Medium-73291-Graduate MetricsServer FeatureGate to GA [Serial]
|
['"context"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-73291-Graduate MetricsServer FeatureGate to GA [Serial]", func() {
var (
metrics_server_test = filepath.Join(monitoringBaseDir, "metrics_server_test.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check metrics-server pods are ready")
getReadyPodsWithLabels(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
exutil.By("label master node with metrics-server label")
nodeList, err := getNodesWithLabel(oc, "node-role.kubernetes.io/master")
o.Expect(err).NotTo(o.HaveOccurred())
for _, node := range nodeList {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, "metricsserver-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, "metricsserver=deploy").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("schedule metrics-server pods to master node")
createResourceFromYaml(oc, "openshift-monitoring", metrics_server_test)
podCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 300*time.Second, true, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/component=metrics-server").Output()
if err != nil || strings.Contains(output, "Terminating") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(podCheck, "metrics-server pods did not restarting!")
exutil.By("confirm metrics-server pods scheduled to master nodes, this step may take few mins")
getReadyPodsWithLabels(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
podNames, err := getAllRunningPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
nodeName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-ojsonpath={.spec.nodeName}", "-n", "openshift-monitoring").Output()
nodeCheck, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-ojsonpath={.metadata.labels}").Output()
o.Expect(strings.Contains(string(nodeCheck), "node-role.kubernetes.io/master")).Should(o.BeTrue())
}
exutil.By("check config applied")
for _, pod := range podNames {
// % oc -n openshift-monitoring get pod metrics-server-7778dbf79b-8frpq -o jsonpath='{.spec.nodeSelector}' | jq
cmd := "-ojsonpath={.spec.nodeSelector}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"metricsserver":"deploy"`, true)
// % oc -n openshift-monitoring get pod metrics-server-7778dbf79b-8frpq -o jsonpath='{.spec.topologySpreadConstraints}' | jq
cmd = "-ojsonpath={.spec.topologySpreadConstraints}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"app.kubernetes.io/name":"metrics-server"`, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"maxSkew":2`, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"topologyKey":"metricsserver"`, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"whenUnsatisfiable":"DoNotSchedule"`, true)
// % oc get pod -n openshift-monitoring metrics-server-c8cbfd6ff-pnk2z -o go-template='{{range.spec.containers}}{{"Container Name: "}}{{.name}}{{"\r\nresources: "}}{{.resources}}{{"\n"}}{{end}}'
cmd = `-ogo-template={{range.spec.containers}}{{"Container Name: "}}{{.name}}{{"\r\nresources: "}}{{.resources}}{{"\n"}}{{end}}`
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `resources: map[limits:map[cpu:50m memory:500Mi] requests:map[cpu:10m memory:50Mi]]`, true)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
ae5c6791-f520-433e-9c96-9e327695312c
|
Author:tagao-Medium-72776-Enable audit logging to Metrics Server - invalid value [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-72776-Enable audit logging to Metrics Server - invalid value [Serial]", func() {
var (
invalid_value_audit_profile = filepath.Join(monitoringBaseDir, "invalid_value_audit_profile.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check default audit level is Metadata")
//% oc -n openshift-monitoring get deploy metrics-server -ojsonpath='{.spec.template.spec.containers[?(@.name=="metrics-server")].args}' | jq
cmd := `-ojsonpath={.spec.template.spec.containers[?(@.name=="metrics-server")].args}`
checkYamlconfig(oc, "openshift-monitoring", "deploy", "metrics-server", cmd, `"--audit-policy-file=/etc/audit/metadata-profile.yaml"`, true)
exutil.By("set invalid value for audit profile")
createResourceFromYaml(oc, "openshift-monitoring", invalid_value_audit_profile)
exutil.By("check failed log in CMO")
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=cluster-monitoring-operator", "cluster-monitoring-operator", `adapter audit profile: metadata`, true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
ec6f0aae-dd34-4ca2-a32c-15dca6b62444
|
Author:tagao-Medium-72707-Enable audit logging to Metrics Server [Serial]
|
['"os/exec"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-72707-Enable audit logging to Metrics Server [Serial]", func() {
var (
valid_value_audit_profile = filepath.Join(monitoringBaseDir, "valid_value_audit_profile.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check audit file path")
//% oc -n openshift-monitoring get deploy metrics-server -ojsonpath='{.spec.template.spec.containers[?(@.name=="metrics-server")].args}' | jq
cmd := `-ojsonpath={.spec.template.spec.containers[?(@.name=="metrics-server")].args}`
checkYamlconfig(oc, "openshift-monitoring", "deploy", "metrics-server", cmd, `"--audit-policy-file=/etc/audit/metadata-profile.yaml"`, true)
exutil.By("check the audit log")
//% oc -n openshift-monitoring exec -c metrics-server metrics-server-777f5464ff-5fdvh -- cat /var/log/metrics-server/audit.log
getReadyPodsWithLabels(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
podNames, err := getAllRunningPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
cmd := "cat /var/log/metrics-server/audit.log"
checkConfigInsidePod(oc, "openshift-monitoring", "metrics-server", pod, cmd, `"level":"Metadata"`, true)
}
exutil.By("set audit profile as Request")
createResourceFromYaml(oc, "openshift-monitoring", valid_value_audit_profile)
exutil.By("check the deploy config applied")
//oc -n openshift-monitoring get deploy metrics-server -ojsonpath='{.spec.template.spec.containers[?(@.name=="metrics-server")].args}' | jq
cmd = `-ojsonpath={.spec.template.spec.containers[?(@.name=="metrics-server")].args}`
checkYamlconfig(oc, "openshift-monitoring", "deploy", "metrics-server", cmd, `"--audit-policy-file=/etc/audit/request-profile.yaml"`, true)
exutil.By("check the policy reflect into pod")
getReadyPodsWithLabels(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
podNames, err = getAllRunningPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
//oc -n openshift-monitoring exec -c metrics-server metrics-server-85db9c79c8-sljdb -- cat /etc/audit/request-profile.yaml
cmd := "cat /etc/audit/request-profile.yaml"
checkConfigInsidePod(oc, "openshift-monitoring", "metrics-server", pod, cmd, `"name": "Request"`, true)
checkConfigInsidePod(oc, "openshift-monitoring", "metrics-server", pod, cmd, `"level": "Request"`, true)
//oc -n openshift-monitoring exec -c metrics-server metrics-server-85db9c79c8-sljdb -- cat /var/log/metrics-server/audit.log
cmd = "cat /var/log/metrics-server/audit.log"
checkConfigInsidePod(oc, "openshift-monitoring", "metrics-server", pod, cmd, `level":"Request"`, true)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
10aca514-bed2-45f0-82dd-24af3acf1fc4
|
Author:hongyli-Critical-44032-Restore cluster monitoring stack default configuration [Serial]
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:hongyli-Critical-44032-Restore cluster monitoring stack default configuration [Serial]", func() {
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Delete config map user-workload--monitoring-config")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
exutil.By("Delete config map cluster-monitoring-config")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("alertmanager", "test-alertmanager", "-n", "openshift-user-workload-monitoring", "--ignore-not-found").Execute()
exutil.By("Delete alertmanager under openshift-user-workload-monitoring")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
f6a65cee-1931-45ad-b5c0-6ca54dcc5652
|
Author:hongyli-High-49745-High-50519-Retention for UWM Prometheus and thanos ruler
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:hongyli-High-49745-High-50519-Retention for UWM Prometheus and thanos ruler", func() {
exutil.By("Check retention size of prometheus user workload")
checkRetention(oc, "openshift-user-workload-monitoring", "prometheus-user-workload", "storage.tsdb.retention.size=5GiB", uwmLoadTime)
exutil.By("Check retention of prometheus user workload")
checkRetention(oc, "openshift-user-workload-monitoring", "prometheus-user-workload", "storage.tsdb.retention.time=15d", 20)
exutil.By("Check retention of thanos ruler")
checkRetention(oc, "openshift-user-workload-monitoring", "thanos-ruler-user-workload", "retention=15d", uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
f9d101ad-dbbc-4c0f-89d0-690ccb8f4d4c
|
Author:juzhao-LEVEL0-Medium-42956-Should not have PrometheusNotIngestingSamples alert if enabled user workload monitoring only
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-LEVEL0-Medium-42956-Should not have PrometheusNotIngestingSamples alert if enabled user workload monitoring only", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check alerts, Should not have PrometheusNotIngestingSamples alert fired")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusNotIngestingSamples"}'`, token, `"result":[]`, uwmLoadTime)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
4e05a0f8-3da4-4277-8e89-dd6676340bec
|
Author:juzhao-Medium-70998-PrometheusRestrictedConfig supports enabling sendExemplars
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-70998-PrometheusRestrictedConfig supports enabling sendExemplars", func() {
exutil.By("check exemplar-storage is enabled")
cmd := "-ojsonpath={.spec.enableFeatures[*]}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "prometheus", "user-workload", cmd, "exemplar-storage", true)
//check settings in UWM prometheus pods
podNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/name=prometheus")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
cmd = "-ojsonpath={.spec.containers[?(@.name==\"prometheus\")].args}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, `--enable-feature=`, true)
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, `exemplar-storage`, true)
}
exutil.By("check sendExemplars is true in UWM prometheus CRD")
cmd = "-ojsonpath={.spec.remoteWrite}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "prometheus", "user-workload", cmd, `"sendExemplars":true`, true)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
27e2831d-5402-42b9-a4a0-baf1e76b8920
|
Author:tagao-LEVEL0-Medium-46301-Allow OpenShift users to configure query log file for Prometheus
|
['"os/exec"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-LEVEL0-Medium-46301-Allow OpenShift users to configure query log file for Prometheus", func() {
exutil.By("confirm prometheus-k8s-0 pod is ready for check")
MONpod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring").Output()
e2e.Logf("the MON pods condition: %s", MONpod)
assertPodToBeReady(oc, "prometheus-k8s-0", "openshift-monitoring")
ensurePodRemainsReady(oc, "prometheus-k8s-0", "openshift-monitoring", 30*time.Second, 5*time.Second)
cmd := "ls /tmp/promethues_query.log"
checkConfigInsidePod(oc, "openshift-monitoring", "prometheus", "prometheus-k8s-0", cmd, "promethues_query.log", true)
exutil.By("check query log file for prometheus in openshift-monitoring")
queryErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "curl", "http://localhost:9090/api/v1/query?query=prometheus_build_info").Execute()
o.Expect(queryErr).NotTo(o.HaveOccurred())
cmd = "cat /tmp/promethues_query.log | grep prometheus_build_info"
checkConfigInsidePod(oc, "openshift-monitoring", "prometheus", "prometheus-k8s-0", cmd, "prometheus_build_info", true)
exutil.By("confirm prometheus-user-workload-0 pod is ready for check")
UWMpod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-user-workload-monitoring").Output()
e2e.Logf("the UWM pods condition: %s", UWMpod)
assertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
ensurePodRemainsReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring", 60*time.Second, 5*time.Second)
cmd = "ls /tmp/uwm_query.log"
checkConfigInsidePod(oc, "openshift-user-workload-monitoring", "prometheus", "prometheus-user-workload-0", cmd, "uwm_query.log", true)
exutil.By("check query log file for prometheus in openshift-user-workload-monitoring")
queryErr = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-user-workload-monitoring", "-c", "prometheus", "prometheus-user-workload-0", "--", "curl", "http://localhost:9090/api/v1/query?query=up").Execute()
o.Expect(queryErr).NotTo(o.HaveOccurred())
cmd = "cat /tmp/uwm_query.log | grep up"
checkConfigInsidePod(oc, "openshift-user-workload-monitoring", "prometheus", "prometheus-user-workload-0", cmd, "up", true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
ad174e9e-90a6-40d1-8325-c9deb4c9500c
|
Author:tagao-Medium-50008-Expose sigv4 settings for remote write in the CMO configuration [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-50008-Expose sigv4 settings for remote write in the CMO configuration [Serial]", func() {
var (
sigv4ClusterCM = filepath.Join(monitoringBaseDir, "sigv4-cluster-monitoring-cm.yaml")
sigv4UwmCM = filepath.Join(monitoringBaseDir, "sigv4-uwm-monitoring-cm.yaml")
sigv4Secret = filepath.Join(monitoringBaseDir, "sigv4-secret.yaml")
sigv4SecretUWM = filepath.Join(monitoringBaseDir, "sigv4-secret-uwm.yaml")
)
exutil.By("delete secret/cm at the end of case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "sigv4-credentials-uwm", "-n", "openshift-user-workload-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "sigv4-credentials", "-n", "openshift-monitoring").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Create sigv4 secret under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", sigv4Secret)
exutil.By("Configure remote write sigv4 and enable user workload monitoring")
createResourceFromYaml(oc, "openshift-monitoring", sigv4ClusterCM)
exutil.By("confirm prometheus-k8s-0 pod is ready for check")
pod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=prometheus").Output()
e2e.Logf("the prometheus pods condition: %s", pod)
exutil.AssertPodToBeReady(oc, "prometheus-k8s-0", "openshift-monitoring")
exutil.By("Check sig4 config under openshift-monitoring")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "url: https://authorization.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "sigv4:")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "region: us-central1")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "access_key: basic_user")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "secret_key: basic_pass")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "profile: SomeProfile")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "role_arn: SomeRoleArn")
exutil.By("Create sigv4 secret under openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", sigv4SecretUWM)
exutil.By("Configure remote write sigv4 setting for user workload monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", sigv4UwmCM)
exutil.By("confirm prometheus-user-workload-0 pod is ready for check")
pod, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-user-workload-monitoring", "-l", "app.kubernetes.io/name=prometheus").Output()
e2e.Logf("the prometheus pods condition: %s", pod)
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
exutil.By("Check sig4 config under openshift-user-workload-monitoring")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://authorization.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "sigv4:")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "region: us-east2")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "access_key: basic_user_uwm")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "secret_key: basic_pass_uwm")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "profile: umw_Profile")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "role_arn: umw_RoleArn")
})
| |||||
test case
|
openshift/openshift-tests-private
|
68bdfe31-f84e-41db-aeb2-87cc7e554752
|
Author:tagao-Medium-49694-Expose OAuth2 settings for remote write in the CMO configuration [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-49694-Expose OAuth2 settings for remote write in the CMO configuration [Serial]", func() {
var (
oauth2ClusterCM = filepath.Join(monitoringBaseDir, "oauth2-cluster-monitoring-cm.yaml")
oauth2UwmCM = filepath.Join(monitoringBaseDir, "oauth2-uwm-monitoring-cm.yaml")
oauth2Secret = filepath.Join(monitoringBaseDir, "oauth2-secret.yaml")
oauth2SecretUWM = filepath.Join(monitoringBaseDir, "oauth2-secret-uwm.yaml")
)
exutil.By("delete secret/cm at the end of case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "oauth2-credentials", "-n", "openshift-user-workload-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "oauth2-credentials", "-n", "openshift-monitoring").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Create oauth2 secret under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", oauth2Secret)
exutil.By("Configure remote write oauth2 and enable user workload monitoring")
createResourceFromYaml(oc, "openshift-monitoring", oauth2ClusterCM)
exutil.By("Check oauth2 config under openshift-monitoring")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "url: https://test.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "remote_timeout: 30s")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "client_id: basic_user")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "client_secret: basic_pass")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "token_url: https://example.com/oauth2/token")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "scope1")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "scope2")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "param1: value1")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "param2: value2")
exutil.By("Create oauth2 secret under openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", oauth2SecretUWM)
exutil.By("Configure remote write oauth2 setting for user workload monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", oauth2UwmCM)
exutil.By("Check oauth2 config under openshift-user-workload-monitoring")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://test.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "remote_timeout: 30s")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "client_id: basic_user")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "client_secret: basic_pass")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "token_url: https://example.com/oauth2/token")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "scope3")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "scope4")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "param3: value3")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "param4: value4")
})
| |||||
test case
|
openshift/openshift-tests-private
|
4885fb6e-bb23-4e38-89a1-d1a956d25fe1
|
Author:tagao-Medium-47519-Platform prometheus operator should reconcile AlertmanagerConfig resources from user namespaces [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-47519-Platform prometheus operator should reconcile AlertmanagerConfig resources from user namespaces [Serial]", func() {
var (
enableAltmgrConfig = filepath.Join(monitoringBaseDir, "enableUserAlertmanagerConfig.yaml")
wechatConfig = filepath.Join(monitoringBaseDir, "exampleAlertConfigAndSecret.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("enable alert manager config")
createResourceFromYaml(oc, "openshift-monitoring", enableAltmgrConfig)
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
exutil.By("check the initial alertmanager configuration")
checkAlertmanagerConfig(oc, "openshift-monitoring", "alertmanager-main-0", "alertname = Watchdog", true)
exutil.By("create&check alertmanagerconfig under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", wechatConfig)
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanagerconfig/config-example", "secret/wechat-config", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("config-example"))
o.Expect(output).To(o.ContainSubstring("wechat-config"))
exutil.By("check if the new created AlertmanagerConfig is reconciled in the Alertmanager configuration (should not)")
checkAlertmanagerConfig(oc, "openshift-monitoring", "alertmanager-main-0", "wechat", false)
exutil.By("delete the alertmanagerconfig/secret created under openshift-monitoring")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("alertmanagerconfig/config-example", "secret/wechat-config", "-n", "openshift-monitoring").Execute()
exutil.By("create one new project, label the namespace and create the same AlertmanagerConfig")
oc.SetupProject()
ns := oc.Namespace()
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ns, "openshift.io/user-monitoring=false").Execute()
exutil.By("create&check alertmanagerconfig under the namespace")
createResourceFromYaml(oc, ns, wechatConfig)
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanagerconfig/config-example", "secret/wechat-config", "-n", ns).Output()
o.Expect(output2).To(o.ContainSubstring("config-example"))
o.Expect(output2).To(o.ContainSubstring("wechat-config"))
exutil.By("check if the new created AlertmanagerConfig is reconciled in the Alertmanager configuration (should not)")
checkAlertmanagerConfig(oc, "openshift-monitoring", "alertmanager-main-0", "wechat", false)
exutil.By("update the label to true")
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ns, "openshift.io/user-monitoring=true", "--overwrite").Execute()
exutil.By("check if the new created AlertmanagerConfig is reconciled in the Alertmanager configuration")
checkAlertmanagerConfig(oc, "openshift-monitoring", "alertmanager-main-0", "wechat", true)
exutil.By("set enableUserAlertmanagerConfig to false")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "cluster-monitoring-config", "-p", `{"data": {"config.yaml": "alertmanagerMain:\n enableUserAlertmanagerConfig: false\n"}}`, "--type=merge", "-n", "openshift-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("the AlertmanagerConfig from user project is removed")
checkAlertmanagerConfig(oc, "openshift-monitoring", "alertmanager-main-0", "wechat", false)
})
| |||||
test case
|
openshift/openshift-tests-private
|
1321213b-e340-4d1a-bf27-4162bc84ddcf
|
Author:tagao-Medium-49404-Medium-49176-Expose Authorization settings for remote write in the CMO configuration, Add the relabel config to all user-supplied remote_write configurations [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-49404-Medium-49176-Expose Authorization settings for remote write in the CMO configuration, Add the relabel config to all user-supplied remote_write configurations [Serial]", func() {
var (
authClusterCM = filepath.Join(monitoringBaseDir, "auth-cluster-monitoring-cm.yaml")
authUwmCM = filepath.Join(monitoringBaseDir, "auth-uwm-monitoring-cm.yaml")
authSecret = filepath.Join(monitoringBaseDir, "auth-secret.yaml")
authSecretUWM = filepath.Join(monitoringBaseDir, "auth-secret-uwm.yaml")
)
exutil.By("delete secret/cm at the end of case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "rw-auth", "-n", "openshift-user-workload-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "rw-auth", "-n", "openshift-monitoring").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Create auth secret under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", authSecret)
exutil.By("Configure remote write auth and enable user workload monitoring")
createResourceFromYaml(oc, "openshift-monitoring", authClusterCM)
exutil.By("confirm prometheus-k8s-0 pod is ready for check")
pod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=prometheus").Output()
e2e.Logf("the prometheus pods condition: %s", pod)
exutil.AssertPodToBeReady(oc, "prometheus-k8s-0", "openshift-monitoring")
exutil.By("Check auth config under openshift-monitoring")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "url: https://remote-write.endpoint")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "target_label: __tmp_openshift_cluster_id__")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "url: https://basicAuth.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "username: basic_user")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "password: basic_pass")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "url: https://authorization.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "__tmp_openshift_cluster_id__")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "target_label: cluster_id")
exutil.By("Create auth secret under openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", authSecretUWM)
exutil.By("Configure remote write auth setting for user workload monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", authUwmCM)
exutil.By("confirm prometheus-user-workload-0 pod is ready for check")
pod, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-user-workload-monitoring", "-l", "app.kubernetes.io/name=prometheus").Output()
e2e.Logf("the prometheus pods condition: %s", pod)
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
exutil.By("Check auth config under openshift-user-workload-monitoring")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://remote-write.endpoint")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "target_label: __tmp_openshift_cluster_id__")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://basicAuth.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "username: basic_user")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "password: basic_pass")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://bearerTokenFile.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://authorization.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "__tmp_openshift_cluster_id__")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "target_label: cluster_id_1")
})
| |||||
test case
|
openshift/openshift-tests-private
|
dcec6377-c148-47b5-8d85-92a193f78896
|
Author:tagao-Low-43037-Should not have error for oc adm inspect clusteroperator monitoring command
|
['"os/exec"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Low-43037-Should not have error for oc adm inspect clusteroperator monitoring command", func() {
exutil.By("delete must-gather file at the end of case")
defer exec.Command("bash", "-c", "rm -rf /tmp/must-gather-43037").Output()
exutil.By("oc adm inspect clusteroperator monitoring")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("adm").Args("inspect", "clusteroperator", "monitoring", "--dest-dir=/tmp/must-gather-43037").Output()
o.Expect(output).NotTo(o.ContainSubstring("error"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
7c70d2ca-b6ea-4186-b503-36cf8ebdc3a0
|
Author:tagao-Medium-32224-Separate user workload configuration [Serial]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-32224-Separate user workload configuration [Serial]", func() {
var (
separateUwmConf = filepath.Join(monitoringBaseDir, "separate-uwm-config.yaml")
)
exutil.By("delete uwm-config/cm-config and bound pvc at the end of a serial case")
defer func() {
PvcNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pvc", "-ojsonpath={.items[*].metadata.name}", "-l", "app.kubernetes.io/instance=user-workload", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, pvc := range strings.Fields(PvcNames) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", pvc, "-n", "openshift-user-workload-monitoring").Execute()
}
}()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("this case should execute on cluster which have storage class")
checkSc, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sc").Output()
if checkSc == "{}" || !strings.Contains(checkSc, "default") {
g.Skip("This case should execute on cluster which have default storage class!")
}
exutil.By("get master node names with label")
NodeNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/master", "-ojsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(NodeNames)
exutil.By("add labels to master nodes, and delete them at the end of case")
for _, name := range nodeNameList {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", name, "uwm-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", name, "uwm=deploy").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("create the separate user workload configuration")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", separateUwmConf)
exutil.By("check remoteWrite metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=prometheus_remote_storage_shards'`, token, `"url":"http://localhost:1234/receive"`, 3*uwmLoadTime)
exutil.By("check prometheus-user-workload pods are bound to PVCs, check cpu and memory")
PodNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-ojsonpath={.items[*].metadata.name}", "-l", "app.kubernetes.io/name=prometheus", "-n", "openshift-user-workload-monitoring").Output()
PodNameList := strings.Fields(PodNames)
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range PodNameList {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-ojsonpath={.spec.volumes[]}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("uwm-prometheus"))
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, `-ojsonpath={.spec.containers[?(@.name=="prometheus")].resources.requests}`, "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring(`"cpu":"200m","memory":"1Gi"`))
}
exutil.By("check thanos-ruler-user-workload pods are bound to PVCs, check cpu and memory")
PodNames, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-ojsonpath={.items[*].metadata.name}", "-l", "app.kubernetes.io/name=thanos-ruler", "-n", "openshift-user-workload-monitoring").Output()
PodNameList = strings.Fields(PodNames)
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range PodNameList {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-ojsonpath={.spec.volumes[]}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("thanosruler"))
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, `-ojsonpath={.spec.containers[?(@.name=="thanos-ruler")].resources.requests}`, "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring(`"cpu":"20m","memory":"50Mi"`))
}
exutil.By("toleration settings check")
PodNames, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-ojsonpath={.items[*].metadata.name}", "-n", "openshift-user-workload-monitoring").Output()
PodNameList = strings.Fields(PodNames)
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range PodNameList {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-ojsonpath={.spec.tolerations}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("node-role.kubernetes.io/master"))
o.Expect(output).To(o.ContainSubstring(`"operator":"Exists"`))
}
exutil.By("prometheus.enforcedSampleLimit check")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "user-workload", "-ojsonpath={.spec.enforcedSampleLimit}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("2"))
exutil.By("prometheus.retention check")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "user-workload", "-ojsonpath={.spec.retention}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("48h"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
d4144f6b-7627-435e-af60-0d578b03b986
|
Author:tagao-LEVEL0-Medium-50954-Allow the deployment of a dedicated UWM Alertmanager [Serial]
|
['"context"', '"path/filepath"', '"strings"', '"time"', 'g "github.com/onsi/ginkgo/v2"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-LEVEL0-Medium-50954-Allow the deployment of a dedicated UWM Alertmanager [Serial]", func() {
var (
dedicatedUWMalertmanager = filepath.Join(monitoringBaseDir, "dedicated-uwm-alertmanager.yaml")
exampleAlert = filepath.Join(monitoringBaseDir, "example-alert-rule.yaml")
AlertmanagerConfig = filepath.Join(monitoringBaseDir, "exampleAlertConfigAndSecret.yaml")
)
exutil.By("delete uwm-config/cm-config and bound pvc at the end of a serial case")
defer func() {
PvcNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pvc", "-ojsonpath={.items[*].metadata.name}", "-l", "alertmanager=user-workload", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, pvc := range strings.Fields(PvcNames) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", pvc, "-n", "openshift-user-workload-monitoring").Execute()
}
}()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("this case should execute on cluster which have storage class")
checkSc, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sc").Output()
if checkSc == "{}" || !strings.Contains(checkSc, "default") {
g.Skip("This case should execute on cluster which have default storage class!")
}
exutil.By("get master node names with label")
NodeNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/master", "-ojsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(NodeNames)
exutil.By("add labels to master nodes, and delete them at the end of case")
for _, name := range nodeNameList {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", name, "uwm-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", name, "uwm=alertmanager").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("create the dedicated UWM Alertmanager configuration")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", dedicatedUWMalertmanager)
exutil.By("deploy prometheusrule and alertmanagerconfig to user project")
oc.SetupProject()
ns := oc.Namespace()
createResourceFromYaml(oc, ns, exampleAlert)
createResourceFromYaml(oc, ns, AlertmanagerConfig)
exutil.By("check all pods are created")
exutil.AssertAllPodsToBeReady(oc, "openshift-user-workload-monitoring")
exutil.By("confirm thanos-ruler is ready")
exutil.AssertPodToBeReady(oc, "thanos-ruler-user-workload-0", "openshift-user-workload-monitoring")
thanosPod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", " app.kubernetes.io/name=thanos-ruler", "-n", "openshift-user-workload-monitoring").Output()
e2e.Logf("thanos-ruler pods: \n%v", thanosPod)
thanosSaErr := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 60*time.Second, true, func(context.Context) (bool, error) {
thanosSa, err := oc.AsAdmin().Run("get").Args("sa", "thanos-ruler", "-n", "openshift-user-workload-monitoring").Output()
if err != nil || strings.Contains(thanosSa, "not found") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(thanosSaErr, "sa not created")
exutil.By("check the alerts could be found in alertmanager under openshift-user-workload-monitoring project")
token := getSAToken(oc, "thanos-ruler", "openshift-user-workload-monitoring")
checkMetric(oc, `https://alertmanager-user-workload.openshift-user-workload-monitoring.svc:9095/api/v2/alerts`, token, "TestAlert1", 3*uwmLoadTime)
exutil.By("check the alerts could not be found in openshift-monitoring project")
//same as: checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?&filter={alertname="TestAlert1"}`, token, "[]", 3*uwmLoadTime)
checkAlertNotExist(oc, "https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts", token, "TestAlert1", 3*uwmLoadTime)
exutil.By("get alertmanager pod names")
PodNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-ojsonpath={.items[*].metadata.name}", "-l", "app.kubernetes.io/name=alertmanager", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check alertmanager pod resources limits and requests")
for _, pod := range strings.Fields(PodNames) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, `-ojsonpath={.spec.containers[?(@.name=="alertmanager")].resources.limits}`, "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring(`"cpu":"100m","memory":"250Mi"`))
o.Expect(err).NotTo(o.HaveOccurred())
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, `-ojsonpath={.spec.containers[?(@.name=="alertmanager")].resources.requests}`, "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring(`"cpu":"40m","memory":"200Mi"`))
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("check alertmanager pod are bound pvcs")
for _, pod := range strings.Fields(PodNames) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-ojsonpath={.spec.volumes[]}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("uwm-alertmanager"))
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("check AlertmanagerConfigs are take effect")
for _, pod := range strings.Fields(PodNames) {
checkAlertmanagerConfig(oc, "openshift-user-workload-monitoring", pod, "api_url: http://wechatserver:8080/", true)
}
exutil.By("check logLevel is correctly set")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanager/user-workload", "-ojsonpath={.spec.logLevel}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("debug"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check logLevel is take effect")
for _, pod := range strings.Fields(PodNames) {
output, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args("-c", "alertmanager", pod, "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(output, "level=debug") {
e2e.Failf("logLevel is wrong or not take effect")
}
}
exutil.By("disable alertmanager in user-workload-monitoring-config")
//oc patch cm user-workload-monitoring-config -p '{"data": {"config.yaml": "alertmanager:\n enabled: false\n"}}' --type=merge -n openshift-user-workload-monitoring
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "user-workload-monitoring-config", "-p", `{"data": {"config.yaml": "alertmanager:\n enabled: false\n"}}`, "--type=merge", "-n", "openshift-user-workload-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("should found user project alerts in platform alertmanager")
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts`, token, "TestAlert1", 3*uwmLoadTime)
exutil.By("UWM alertmanager pod should disappear") //need time to wait pod fully terminated, put this step after the checkMetric
checkPodDeleted(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/name=alertmanager", "alertmanager")
})
| |||||
test case
|
openshift/openshift-tests-private
|
d70d7bd2-3b22-4c03-9e33-a45e3271a154
|
ConnectedOnly-Author:tagao-Medium-43286-Allow sending alerts to external Alertmanager for user workload monitoring components - enabled in-cluster alertmanager
|
['"path/filepath"', '"strings"', 'g "github.com/onsi/ginkgo/v2"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("ConnectedOnly-Author:tagao-Medium-43286-Allow sending alerts to external Alertmanager for user workload monitoring components - enabled in-cluster alertmanager", func() {
var (
testAlertmanager = filepath.Join(monitoringBaseDir, "example-alertmanager.yaml")
exampleAlert = filepath.Join(monitoringBaseDir, "example-alert-rule.yaml")
exampleAlert2 = filepath.Join(monitoringBaseDir, "leaf-prometheus-rule.yaml")
)
exutil.By("create alertmanager and set external alertmanager for prometheus/thanosRuler under openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", testAlertmanager)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("alertmanager", "test-alertmanager", "-n", "openshift-user-workload-monitoring").Execute()
exutil.By("check alertmanager pod is created")
alertmanagerTestPodCheck(oc)
exutil.By("skip case on disconnected cluster")
output, err := oc.AsAdmin().Run("get").Args("pod", "alertmanager-test-alertmanager-0", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the pod condition: %s", output)
if output != "{}" && strings.Contains(output, "ImagePullBackOff") {
g.Skip("This case can not execute on a disconnected cluster!")
}
exutil.By("create example PrometheusRule under user namespace")
oc.SetupProject()
ns1 := oc.Namespace()
createResourceFromYaml(oc, ns1, exampleAlert)
exutil.By("create another user namespace then create PrometheusRule with leaf-prometheus label")
oc.SetupProject()
ns2 := oc.Namespace()
createResourceFromYaml(oc, ns2, exampleAlert2)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check the user alerts TestAlert1 and TestAlert2 are shown in \"in-cluster alertmanager\" API")
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?filter={alertname="TestAlert1"}`, token, "TestAlert1", uwmLoadTime)
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?filter={alertname="TestAlert1"}`, token, `"generatorURL":"https://console-openshift-console.`, uwmLoadTime)
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?filter={alertname="TestAlert2"}`, token, "TestAlert2", uwmLoadTime)
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?filter={alertname="TestAlert2"}`, token, `"generatorURL":"https://console-openshift-console.`, uwmLoadTime)
exutil.By("check the alerts are also sent to external alertmanager")
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="TestAlert1"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", "TestAlert1", uwmLoadTime)
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="TestAlert1"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", `"generatorURL":"https://console-openshift-console.`, uwmLoadTime)
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="TestAlert2"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", "TestAlert2", uwmLoadTime)
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="TestAlert2"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", `"generatorURL":"https://console-openshift-console.`, uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
6bea99b8-a199-4d12-9080-ba4abe9c9b6e
|
Author:tagao-ConnectedOnly-Medium-43311-Allow sending alerts to external Alertmanager for user workload monitoring components - disabled in-cluster alertmanager [Serial]
|
['"path/filepath"', '"strings"', 'g "github.com/onsi/ginkgo/v2"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-ConnectedOnly-Medium-43311-Allow sending alerts to external Alertmanager for user workload monitoring components - disabled in-cluster alertmanager [Serial]", func() {
var (
InClusterMonitoringCM = filepath.Join(monitoringBaseDir, "disLocalAlert-setExternalAlert-prometheus.yaml")
testAlertmanager = filepath.Join(monitoringBaseDir, "example-alertmanager.yaml")
exampleAlert = filepath.Join(monitoringBaseDir, "example-alert-rule.yaml")
)
exutil.By("Restore cluster monitoring stack default configuration")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("alertmanager", "test-alertmanager", "-n", "openshift-user-workload-monitoring", "--ignore-not-found").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("disable local alertmanager and set external manager for prometheus")
createResourceFromYaml(oc, "openshift-monitoring", InClusterMonitoringCM)
exutil.By("create alertmanager and set external alertmanager for prometheus/thanosRuler under openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", testAlertmanager)
exutil.By("check alertmanager pod is created")
alertmanagerTestPodCheck(oc)
exutil.By("skip case on disconnected cluster")
cmCheck, _ := oc.AsAdmin().Run("get").Args("cm", "cluster-monitoring-config", "-n", "openshift-monitoring", "-ojson").Output()
poCheck, _ := oc.AsAdmin().Run("get").Args("pod", "-n", "openshift-monitoring").Output()
if !strings.Contains(cmCheck, "telemeter") && !strings.Contains(poCheck, "telemeter") {
g.Skip("This case can not execute on a disconnected cluster!")
}
exutil.By("create example PrometheusRule under user namespace")
oc.SetupProject()
ns1 := oc.Namespace()
createResourceFromYaml(oc, ns1, exampleAlert)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check the user alerts TestAlert1 and in-cluster Watchdog alerts are shown in \"thanos-querier\" API")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="TestAlert1"}'`, token, `TestAlert1`, 3*platformLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="Watchdog"}'`, token, `Watchdog`, 3*platformLoadTime)
exutil.By("check the alerts are also sent to external alertmanager, include the in-cluster and user project alerts")
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="TestAlert1"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", "TestAlert1", 3*uwmLoadTime)
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="Watchdog"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", "Watchdog", 3*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
e6d5fcd0-19bf-4c5d-b082-605b1558c8d2
|
Author:tagao-ConnectedOnly-Medium-44815-Configure containers to honor the global tlsSecurityProfile
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-ConnectedOnly-Medium-44815-Configure containers to honor the global tlsSecurityProfile", func() {
exutil.By("get global tlsSecurityProfile")
// % oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{.spec.observedConfig.servingInfo.cipherSuites}'
cipherSuites, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeapiservers.operator.openshift.io", "cluster", "-ojsonpath={.spec.observedConfig.servingInfo.cipherSuites}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
cipherSuitesFormat := strings.ReplaceAll(cipherSuites, "\"", "")
cipherSuitesFormat = strings.ReplaceAll(cipherSuitesFormat, "[", "")
cipherSuitesFormat = strings.ReplaceAll(cipherSuitesFormat, "]", "")
e2e.Logf("cipherSuites: %s", cipherSuitesFormat)
// % oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{.spec.observedConfig.servingInfo.minTLSVersion}'
minTLSVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeapiservers.operator.openshift.io", "cluster", "-ojsonpath={.spec.observedConfig.servingInfo.minTLSVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check tls-cipher-suites and tls-min-version for metrics-server under openshift-monitoring")
// % oc -n openshift-monitoring get deploy metrics-server -ojsonpath='{.spec.template.spec.containers[?(@tls-cipher-suites=)].args}'
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "metrics-server", "-ojsonpath={.spec.template.spec.containers[?(@tls-cipher-suites=)].args}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(output, cipherSuitesFormat) {
e2e.Failf("tls-cipher-suites is different from global setting! %s", output)
}
if !strings.Contains(output, minTLSVersion) {
e2e.Failf("tls-min-version is different from global setting! %s", output)
}
exutil.By("check tls-cipher-suites and tls-min-version for all pods which use kube-rbac-proxy container under openshift-monitoring/openshift-user-workload-monitoring")
//oc get pod -l app.kubernetes.io/name=alertmanager -n openshift-monitoring
alertmanagerPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=alertmanager")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range alertmanagerPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-metric\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=node-exporter -n openshift-monitoring
nePodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=node-exporter")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range nePodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=kube-state-metrics -n openshift-monitoring
ksmPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=kube-state-metrics")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range ksmPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-main\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-self\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=openshift-state-metrics -n openshift-monitoring
osmPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=openshift-state-metrics")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range osmPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-main\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-self\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=prometheus -n openshift-monitoring
pk8sPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range pk8sPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-thanos\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=prometheus-operator -n openshift-monitoring
poPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus-operator")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range poPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=telemeter-client -n openshift-monitoring
tcPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=telemeter-client")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range tcPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=thanos-query -n openshift-monitoring
tqPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=thanos-query")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range tqPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-rules\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-metrics\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=prometheus-operator -n openshift-user-workload-monitoring
UWMpoPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/name=prometheus-operator")
// `UWMpoPodNames` should only have one value, otherwise means there are PO pods in progress deleting
e2e.Logf("UWMpoPodNames: %v", UWMpoPodNames)
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range UWMpoPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/instance=user-workload -n openshift-user-workload-monitoring
UWMPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/instance=user-workload")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range UWMPodNames {
// Multiple container: kube-rbac-**** under this label, use fuzzy query
cmd := "-ojsonpath={.spec.containers[?(@tls-cipher-suites)].args}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
0223a3dd-de7a-42b9-823f-8f6db454673a
|
Author:tagao-LEVEL0-Medium-68237-Add the trusted CA bundle in the Prometheus user workload monitoring pods
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-LEVEL0-Medium-68237-Add the trusted CA bundle in the Prometheus user workload monitoring pods", func() {
exutil.By("confirm UWM pod is ready")
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
exutil.By("check configmap under namespace: openshift-user-workload-monitoring")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("prometheus-user-workload-trusted-ca-bundle"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check the trusted CA bundle is applied to the pod")
PodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/name=prometheus")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range PodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"prometheus\")].volumeMounts}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, "prometheus-user-workload-trusted-ca-bundle", true)
cmd = "-ojsonpath={.spec.volumes[?(@.name==\"prometheus-user-workload-trusted-ca-bundle\")]}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, "prometheus-user-workload-trusted-ca-bundle", true)
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
b6bbf0eb-a580-48a0-8213-04fb379addfa
|
Author:tagao-Medium-69084-user workLoad components failures leading to CMO degradation/unavailability should be easy to identify [Slow] [Disruptive]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-69084-user workLoad components failures leading to CMO degradation/unavailability should be easy to identify [Slow] [Disruptive]", func() {
var (
UserWorkloadTasksFailed = filepath.Join(monitoringBaseDir, "UserWorkloadTasksFailed.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("trigger UserWorkloadTasksFailed")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", UserWorkloadTasksFailed)
exutil.By("check logs in CMO should see UserWorkloadTasksFailed")
CMOPodName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=cluster-monitoring-operator", "-ojsonpath={.items[].metadata.name}").Output()
exutil.WaitAndGetSpecificPodLogs(oc, "openshift-monitoring", "cluster-monitoring-operator", CMOPodName, "UserWorkloadTasksFailed")
})
| |||||
test case
|
openshift/openshift-tests-private
|
418ef7f1-24f8-47f3-8dd6-e1ca71efecdd
|
Author:tagao-Medium-73112-replace OAuth proxy for Thanos Ruler
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-73112-replace OAuth proxy for Thanos Ruler", func() {
exutil.By("check new secret thanos-user-workload-kube-rbac-proxy-web added")
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
checkSecret, err := oc.AsAdmin().Run("get").Args("secret", "thanos-user-workload-kube-rbac-proxy-web", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(checkSecret).NotTo(o.ContainSubstring("not found"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check old secret thanos-ruler-oauth-cookie removed")
checkSecret, _ = oc.AsAdmin().Run("get").Args("secret", "thanos-ruler-oauth-cookie", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(checkSecret).To(o.ContainSubstring("not found"))
exutil.By("check thanos-ruler sa, `annotations` should be removed")
checkSa, err := oc.AsAdmin().Run("get").Args("sa", "thanos-ruler", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(checkSa).NotTo(o.ContainSubstring("Route"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check thanos-ruler-user-workload pods, thanos-ruler-proxy container is removed")
checkPO, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "thanos-ruler-user-workload-0", "-ojsonpath={.spec.containers[*].name}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(checkPO).NotTo(o.ContainSubstring("thanos-ruler-proxy"))
o.Expect(checkPO).To(o.ContainSubstring("kube-rbac-proxy-web"))
exutil.By("check ThanosRuler, new configs added")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ThanosRuler", "user-workload", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-web\")].args}").Output()
o.Expect(output).To(o.ContainSubstring("config-file=/etc/kube-rbac-proxy/config.yaml"))
o.Expect(output).To(o.ContainSubstring("tls-cert-file=/etc/tls/private/tls.crt"))
o.Expect(output).To(o.ContainSubstring("tls-private-key-file=/etc/tls/private/tls.key"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
1a679ca6-8fe9-4291-96a8-5a8df1699724
|
Author:tagao-High-73213-Enable controller id for CMO Prometheus resources [Serial]
|
['"path/filepath"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-73213-Enable controller id for CMO Prometheus resources [Serial]", func() {
var (
uwmEnableAlertmanager = filepath.Join(monitoringBaseDir, "uwm-enableAlertmanager.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("enable alertmanager for uwm")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", uwmEnableAlertmanager)
exutil.By("wait for all pods ready")
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
exutil.AssertPodToBeReady(oc, "alertmanager-user-workload-0", "openshift-user-workload-monitoring")
exutil.AssertPodToBeReady(oc, "thanos-ruler-user-workload-0", "openshift-user-workload-monitoring")
exutil.By("check alertmanager controller-id")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanager", "main", "-n", "openshift-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(output).To(o.ContainSubstring(`"operator.prometheus.io/controller-id":"openshift-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check UWM alertmanager controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanager", "user-workload", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(output).To(o.ContainSubstring(`"operator.prometheus.io/controller-id":"openshift-user-workload-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check prometheus k8s controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", "-n", "openshift-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(output).To(o.ContainSubstring(`"operator.prometheus.io/controller-id":"openshift-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check prometheus-operator deployment controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "prometheus-operator", "-n", "openshift-monitoring", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].args}").Output()
o.Expect(output).To(o.ContainSubstring(`"--controller-id=openshift-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check UWM prometheus-operator deployment controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "prometheus-operator", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].args}").Output()
o.Expect(output).To(o.ContainSubstring(`"--controller-id=openshift-user-workload-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check UWM prometheus user-workload controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "user-workload", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(output).To(o.ContainSubstring(`"operator.prometheus.io/controller-id":"openshift-user-workload-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check ThanosRuler user-workload controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ThanosRuler", "user-workload", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(output).To(o.ContainSubstring(`"operator.prometheus.io/controller-id":"openshift-user-workload-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
cad121f7-bf03-4a17-aa15-472b435a6d21
|
Author:juzhao-Low-73684-UWM statefulset should not lack serviceName
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Low-73684-UWM statefulset should not lack serviceName", func() {
exutil.By("check spec.serviceName for UWM statefulset")
cmd := "-ojsonpath={.spec.serviceName}}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "statefulset", "prometheus-user-workload", cmd, "prometheus-operated", true)
checkYamlconfig(oc, "openshift-user-workload-monitoring", "statefulset", "thanos-ruler-user-workload", cmd, "thanos-ruler-operated", true)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
143e0f2f-1c05-4227-ac55-dcf51dac8733
|
Author:tagao-Medium-73734-Add ownership annotation for certificates [Serial]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-73734-Add ownership annotation for certificates [Serial]", func() {
var (
uwmEnableAlertmanager = filepath.Join(monitoringBaseDir, "uwm-enableAlertmanager.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("enable alertmanager for uwm")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", uwmEnableAlertmanager)
exutil.By("check annotations added to the CM under the namespace openshift-monitoring")
cmd := "-ojsonpath={.metadata.annotations}"
checkYamlconfig(oc, "openshift-monitoring", "cm", "alertmanager-trusted-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
checkYamlconfig(oc, "openshift-monitoring", "cm", "kubelet-serving-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
checkYamlconfig(oc, "openshift-monitoring", "cm", "prometheus-trusted-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
telemeterPod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "app.kubernetes.io/name=telemeter-client", "-n", "openshift-monitoring").Output()
if strings.Contains(telemeterPod, "telemeter-client") {
checkYamlconfig(oc, "openshift-monitoring", "cm", "telemeter-trusted-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
}
exutil.By("check annotations added to the CM under the namespace openshift-user-workload-monitoring")
checkYamlconfig(oc, "openshift-user-workload-monitoring", "cm", "prometheus-user-workload-trusted-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
checkYamlconfig(oc, "openshift-user-workload-monitoring", "cm", "alertmanager-trusted-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
6a0b0f56-7a6d-4792-a831-abc9122d68a1
|
Author:juzhao-Medium-75489-Set scrape.timestamp tolerance for UWM prometheus
|
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-75489-Set scrape.timestamp tolerance for UWM prometheus", func() {
exutil.By("confirm for UWM prometheus created")
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
prometheus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "user-workload", "-n", "openshift-user-workload-monitoring").Output()
if err != nil || strings.Contains(prometheus, "not found") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "UWM prometheus not created")
exutil.By("check for UWM prometheus scrape.timestamp tolerance")
cmd := `-ojsonpath={.spec.additionalArgs[?(@.name=="scrape.timestamp-tolerance")]}`
checkYamlconfig(oc, "openshift-user-workload-monitoring", "prometheus", "user-workload", cmd, `"value":"15ms"`, true)
exutil.By("check settings in UWM prometheus pods")
podNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/name=prometheus")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"prometheus\")].args}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, `--scrape.timestamp-tolerance=15ms`, true)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
50aad6e2-7611-42d3-b847-02e2df27dcde
|
Author:tagao-High-75384-cross-namespace rules for user-workload monitoring [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-75384-cross-namespace rules for user-workload monitoring [Serial]", func() {
var (
example_cross_ns_alert = filepath.Join(monitoringBaseDir, "example_cross_ns_alert.yaml")
disable_uwm_cross_ns_rules = filepath.Join(monitoringBaseDir, "disable_uwm_cross_ns_rules.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Create a user-monitoring-shared namespace and deploy PrometheusRule")
oc.SetupProject()
ns := oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", "ns-monitoring-75384", "--ignore-not-found").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", "ns-monitoring-75384").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
createResourceFromYaml(oc, "ns-monitoring-75384", example_cross_ns_alert)
exutil.By("check namespace have expect label")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", ns, "-ojsonpath={.metadata.labels}").Output()
o.Expect(output).To(o.ContainSubstring(`"pod-security.kubernetes.io/enforce":"restricted"`))
o.Expect(err).NotTo(o.HaveOccurred())
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", "ns-monitoring-75384", "-ojsonpath={.metadata.labels}").Output()
o.Expect(output).To(o.ContainSubstring(`"pod-security.kubernetes.io/enforce":"restricted"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="TestAlert1", namespace="ns-monitoring-75384"}'`, token, `"namespace":"ns-monitoring-75384"`, 2*uwmLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="TestAlert1", namespace="`+ns+`"}'`, token, `"namespace":"`+ns+`"`, 2*uwmLoadTime)
exutil.By("disable the feature")
createResourceFromYaml(oc, "openshift-monitoring", disable_uwm_cross_ns_rules)
exutil.By("check the alert should not share across the namespace")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="TestAlert1", namespace="`+ns+`"}'`, token, `"result":[]`, 2*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
5064c4da-8bb3-4790-bfbf-3efe7e3fb023
|
Author:hongyli-Critical-43341-Exclude namespaces from user workload monitoring based on label
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:hongyli-Critical-43341-Exclude namespaces from user workload monitoring based on label", func() {
var (
exampleAppRule = filepath.Join(monitoringBaseDir, "example-alert-rule.yaml")
)
exutil.By("label project not being monitored")
labelNameSpace(oc, ns, "openshift.io/user-monitoring=false")
//create example app and alert rule under the project
exutil.By("Create example alert rule!")
createResourceFromYaml(oc, ns, exampleAppRule)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check metrics")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version{namespace=\""+ns+"\"}'", token, "\"result\":[]", 2*uwmLoadTime)
exutil.By("check alerts")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{namespace=\""+ns+"\"}'", token, "\"result\":[]", 2*uwmLoadTime)
exutil.By("label project being monitored")
labelNameSpace(oc, ns, "openshift.io/user-monitoring=true")
exutil.By("check metrics")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version{namespace=\""+ns+"\"}'", token, "prometheus-example-app", 2*uwmLoadTime)
exutil.By("check alerts")
checkMetric(oc, "https://thanos-ruler.openshift-user-workload-monitoring.svc:9091/api/v1/alerts", token, "TestAlert", 2*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
b59234b7-e7a0-404c-a380-7f002832f080
|
Author:hongyli-High-50024-High-49515-Check federate route and service of user workload Prometheus
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:hongyli-High-50024-High-49515-Check federate route and service of user workload Prometheus", func() {
var err error
exutil.By("Bind cluster-monitoring-view RBAC to default service account")
uwmFederateRBACViewName := "uwm-federate-rbac-" + ns
defer deleteBindMonitoringViewRoleToDefaultSA(oc, uwmFederateRBACViewName)
clusterRoleBinding, err := bindMonitoringViewRoleToDefaultSA(oc, ns, uwmFederateRBACViewName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Created: %v %v", "ClusterRoleBinding", clusterRoleBinding.Name)
exutil.By("Get token of default service account")
token := getSAToken(oc, "default", ns)
exutil.By("check uwm federate endpoint service")
checkMetric(oc, "https://prometheus-user-workload.openshift-user-workload-monitoring.svc:9092/federate --data-urlencode 'match[]=version'", token, "prometheus-example-app", 2*uwmLoadTime)
exutil.By("check uwm federate route")
checkRoute(oc, "openshift-user-workload-monitoring", "federate", token, "match[]=version", "prometheus-example-app", 100)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
91304290-d794-47fc-a3ce-3647059ab681
|
Author:tagao-Medium-50241-Prometheus (uwm) externalLabels not showing always in alerts
|
['"path/filepath"', 'g "github.com/onsi/ginkgo/v2"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-50241-Prometheus (uwm) externalLabels not showing always in alerts", func() {
var (
exampleAppRule = filepath.Join(monitoringBaseDir, "in-cluster_query_alert_rule.yaml")
)
exutil.By("Create alert rule with expression about data provided by in-cluster prometheus")
createResourceFromYaml(oc, ns, exampleAppRule)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("Check labelmy is in the alert")
checkMetric(oc, "https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts", token, "labelmy", 2*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
7f333834-d163-41ce-9ae2-9a263c657175
|
Author:tagao-Medium-42825-Expose EnforcedTargetLimit in the CMO configuration for UWM
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-42825-Expose EnforcedTargetLimit in the CMO configuration for UWM", func() {
exutil.By("check user metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version{namespace=\""+ns+"\"}'", token, "prometheus-example-app", 2*uwmLoadTime)
exutil.By("scale deployment replicas to 2")
oc.WithoutNamespace().Run("scale").Args("deployment", "prometheus-example-app", "--replicas=2", "-n", ns).Execute()
exutil.By("check user metrics again, the user metrics can't be found from thanos-querier")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version{namespace=\""+ns+"\"}'", token, "\"result\":[]", 2*uwmLoadTime)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
94187165-9766-4786-9d21-8cfbb75e28c8
|
Author:tagao-Medium-49189-Enforce label scrape limits for UWM [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-49189-Enforce label scrape limits for UWM [Serial]", func() {
var (
invalidUWM = filepath.Join(monitoringBaseDir, "invalid-uwm.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("query metrics from thanos-querier")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version'", token, "prometheus-example-app", uwmLoadTime)
exutil.By("trigger label_limit exceed")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", invalidUWM)
exutil.By("check in thanos-querier /targets api, it should complains the label_limit exceeded")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/targets`, token, `label_limit exceeded`, 2*uwmLoadTime)
exutil.By("trigger label_name_length_limit exceed")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "user-workload-monitoring-config", "-p", `{"data": {"config.yaml": "prometheus:\n enforcedLabelLimit: 8\n enforcedLabelNameLengthLimit: 1\n enforcedLabelValueLengthLimit: 1\n"}}`, "--type=merge", "-n", "openshift-user-workload-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check in thanos-querier /targets api, it should complains the label_name_length_limit exceeded")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/targets`, token, `label_name_length_limit exceeded`, 2*uwmLoadTime)
exutil.By("trigger label_value_length_limit exceed")
err2 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "user-workload-monitoring-config", "-p", `{"data": {"config.yaml": "prometheus:\n enforcedLabelLimit: 8\n enforcedLabelNameLengthLimit: 8\n enforcedLabelValueLengthLimit: 1\n"}}`, "--type=merge", "-n", "openshift-user-workload-monitoring").Execute()
o.Expect(err2).NotTo(o.HaveOccurred())
exutil.By("check in thanos-querier /targets api, it should complains the label_value_length_limit exceeded")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/targets`, token, `label_value_length_limit exceeded`, 2*uwmLoadTime)
exutil.By("relax restrictions")
err3 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "user-workload-monitoring-config", "-p", `{"data": {"config.yaml": "prometheus:\n enforcedLabelLimit: 10\n enforcedLabelNameLengthLimit: 10\n enforcedLabelValueLengthLimit: 50\n"}}`, "--type=merge", "-n", "openshift-user-workload-monitoring").Execute()
o.Expect(err3).NotTo(o.HaveOccurred())
exutil.By("able to see the metrics")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version'", token, "prometheus-example-app", 2*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
9f0439ff-08ea-44b2-811b-548d08abe022
|
Author:tagao-Medium-44805-Expose tenancy-aware labels and values of api v1 label endpoints for Thanos query
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-44805-Expose tenancy-aware labels and values of api v1 label endpoints for Thanos query", func() {
var (
rolebinding = filepath.Join(monitoringBaseDir, "rolebinding.yaml")
)
exutil.By("add RoleBinding to specific user")
createResourceFromYaml(oc, ns, rolebinding)
//oc -n ns1 patch RoleBinding view -p '{"subjects":[{"apiGroup":"rbac.authorization.k8s.io","kind":"User","name":"${user}"}]}'
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("RoleBinding", "view", "-p", `{"subjects":[{"apiGroup":"rbac.authorization.k8s.io","kind":"User","name":"`+oc.Username()+`"}]}`, "--type=merge", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("get user API token")
token := oc.UserConfig().BearerToken
exutil.By("check namespace labels") //There are many labels, only check the few ones
checkMetric(oc, "\"https://thanos-querier.openshift-monitoring.svc:9092/api/v1/labels?namespace="+oc.Namespace()+"\"", token, `"__name__"`, 2*uwmLoadTime)
checkMetric(oc, "\"https://thanos-querier.openshift-monitoring.svc:9092/api/v1/labels?namespace="+oc.Namespace()+"\"", token, `"version"`, 2*uwmLoadTime)
checkMetric(oc, "\"https://thanos-querier.openshift-monitoring.svc:9092/api/v1/labels?namespace="+oc.Namespace()+"\"", token, `"cluster_ip"`, 2*uwmLoadTime)
exutil.By("show label value")
checkMetric(oc, "\"https://thanos-querier.openshift-monitoring.svc:9092/api/v1/label/version/values?namespace="+oc.Namespace()+"\"", token, `"v0.4.1"`, 2*uwmLoadTime)
exutil.By("check with a specific series")
checkMetric(oc, "\"https://thanos-querier.openshift-monitoring.svc:9092/api/v1/series?match[]=version&namespace="+oc.Namespace()+"\"", token, `"service":"prometheus-example-app"`, 2*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
c66b0c31-41dc-4024-848d-e56182a652c9
|
Author:tagao-High-73151-Update Prometheus user-workload to enable additional scrape metrics [Serial]
|
['"path/filepath"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-73151-Update Prometheus user-workload to enable additional scrape metrics [Serial]", func() {
var (
exampleApp2 = filepath.Join(monitoringBaseDir, "example-app-2-sampleLimit.yaml")
approachingEnforcedSamplesLimit = filepath.Join(monitoringBaseDir, "approachingEnforcedSamplesLimit.yaml")
)
exutil.By("restore monitoring config")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("PrometheusRule", "monitoring-stack-alerts", "-n", ns).Execute()
exutil.By("create example-app2")
//example-app2 has sampleLimit and should be created under same ns with example-app
createResourceFromYaml(oc, ns, exampleApp2)
exutil.By("wait for pod ready")
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
exutil.By("check extra-scrape-metrics added to uwm prometheus")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "user-workload", "-ojsonpath={.spec.enableFeatures}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("extra-scrape-metrics"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("set up the alert rules")
createResourceFromYaml(oc, ns, approachingEnforcedSamplesLimit)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check metrics")
exampleAppPods, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns).Output()
e2e.Logf("pods condition under ns:\n%s", exampleAppPods)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=(scrape_sample_limit == 1)'`, token, "prometheus-example-app-2", uwmLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="ApproachingEnforcedSamplesLimit"}'`, token, `"prometheus-example-app-2"`, uwmLoadTime)
})
| |||||
file
|
openshift/openshift-tests-private
|
db1a0cea-fc15-466a-874d-35097769a9b0
|
monitoring_utils
|
import (
"context"
"fmt"
"math/rand"
"os/exec"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
package monitoring
import (
"context"
"fmt"
"math/rand"
"os/exec"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
const platformLoadTime = 120
const uwmLoadTime = 180
type monitoringConfig struct {
name string
namespace string
enableUserWorkload bool
template string
}
func (cm *monitoringConfig) create(oc *exutil.CLI) {
if !checkConfigMap(oc, "openshift-monitoring", "cluster-monitoring-config") {
e2e.Logf("Create configmap: cluster-monitoring-config")
output, err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cm.template, "-p", "NAME="+cm.name, "NAMESPACE="+cm.namespace, "ENABLEUSERWORKLOAD="+fmt.Sprintf("%v", cm.enableUserWorkload))
if err != nil {
if strings.Contains(output, "AlreadyExists") {
err = nil
}
}
o.Expect(err).NotTo(o.HaveOccurred())
}
}
func createUWMConfig(oc *exutil.CLI, uwmMonitoringConfig string) {
if !checkConfigMap(oc, "openshift-user-workload-monitoring", "user-workload-monitoring-config") {
e2e.Logf("Create configmap: user-workload-monitoring-config")
output, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", uwmMonitoringConfig).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
err = nil
}
}
o.Expect(err).NotTo(o.HaveOccurred())
}
}
// check if a configmap is created in specific namespace [usage: checkConfigMap(oc, namespace, configmapName)]
func checkConfigMap(oc *exutil.CLI, ns, configmapName string) bool {
searchOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", configmapName, "-n", ns, "-o=jsonpath={.data.config\\.yaml}").Output()
if err != nil {
return false
}
if strings.Contains(searchOutput, "retention") {
return true
}
return false
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
// the method is to create one resource with template
func applyResourceFromTemplate(oc *exutil.CLI, parameters ...string) (string, error) {
var configFile string
err := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 15*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "cluster-monitoring.json")
if err != nil {
return false, nil
}
configFile = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters))
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Output()
}
func labelNameSpace(oc *exutil.CLI, namespace string, label string) {
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", namespace, label, "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The namespace %s is labeled by %q", namespace, label)
}
func getSAToken(oc *exutil.CLI, account, ns string) string {
e2e.Logf("Getting a token assigned to specific serviceaccount from %s namespace...", ns)
token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", account, "-n", ns).Output()
if err != nil {
if strings.Contains(token, "unknown command") {
token, err = oc.AsAdmin().WithoutNamespace().Run("sa").Args("get-token", account, "-n", ns).Output()
}
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(token).NotTo(o.BeEmpty())
return token
}
// check data by running curl on a pod
func checkMetric(oc *exutil.CLI, url, token, metricString string, timeout time.Duration) {
var metrics string
var err error
getCmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + url
err = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
metrics, err = exutil.RemoteShPod(oc, "openshift-monitoring", "prometheus-k8s-0", "sh", "-c", getCmd)
if err != nil || !strings.Contains(metrics, metricString) {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The metrics %s failed to contain %s", metrics, metricString))
}
func createResourceFromYaml(oc *exutil.CLI, ns, yamlFile string) {
err := oc.AsAdmin().Run("apply").Args("-n", ns, "-f", yamlFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func deleteBindMonitoringViewRoleToDefaultSA(oc *exutil.CLI, uwmFederateRBACViewName string) {
err := oc.AdminKubeClient().RbacV1().ClusterRoleBindings().Delete(context.Background(), uwmFederateRBACViewName, metav1.DeleteOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
}
func bindMonitoringViewRoleToDefaultSA(oc *exutil.CLI, ns, uwmFederateRBACViewName string) (*rbacv1.ClusterRoleBinding, error) {
return oc.AdminKubeClient().RbacV1().ClusterRoleBindings().Create(context.Background(), &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: uwmFederateRBACViewName,
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: "cluster-monitoring-view",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: "default",
Namespace: ns,
},
},
}, metav1.CreateOptions{})
}
func deleteClusterRoleBinding(oc *exutil.CLI, clusterRoleBindingName string) {
err := oc.AdminKubeClient().RbacV1().ClusterRoleBindings().Delete(context.Background(), clusterRoleBindingName, metav1.DeleteOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
}
func bindClusterRoleToUser(oc *exutil.CLI, clusterRoleName, userName, clusterRoleBindingName string) (*rbacv1.ClusterRoleBinding, error) {
return oc.AdminKubeClient().RbacV1().ClusterRoleBindings().Create(context.Background(), &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleBindingName,
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: clusterRoleName,
},
Subjects: []rbacv1.Subject{
{
Kind: "User",
Name: userName,
},
},
}, metav1.CreateOptions{})
}
func checkRoute(oc *exutil.CLI, ns, name, token, queryString, metricString string, timeout time.Duration) {
var metrics string
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
path, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", name, "-n", ns, "-o=jsonpath={.spec.path}").Output()
if err != nil {
return false, nil
}
host, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", name, "-n", ns, "-o=jsonpath={.spec.host}").Output()
if err != nil {
return false, nil
}
metricCMD := fmt.Sprintf("curl -G -s -k -H \"Authorization: Bearer %s\" https://%s%s --data-urlencode '%s'", token, host, path, queryString)
curlOutput, err := exec.Command("bash", "-c", metricCMD).Output()
if err != nil {
return false, nil
}
metrics = string(curlOutput)
if !strings.Contains(metrics, metricString) {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The metrics %s failed to contain %s", metrics, metricString))
}
// check thanos_ruler retention
func checkRetention(oc *exutil.CLI, ns string, sts string, expectedRetention string, timeout time.Duration) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
stsObject, err := oc.AdminKubeClient().AppsV1().StatefulSets(ns).Get(context.Background(), sts, metav1.GetOptions{})
if err != nil {
return false, nil
}
args := stsObject.Spec.Template.Spec.Containers[0].Args
for _, v := range args {
if strings.Contains(v, expectedRetention) {
return true, nil
}
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("the retention of %s is not expected %s", sts, expectedRetention))
}
func deleteConfig(oc *exutil.CLI, configName, ns string) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ConfigMap", configName, "-n", ns, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// patch&check enforcedBodySizeLimit value in cluster-monitoring-config
func patchAndCheckBodySizeLimit(oc *exutil.CLI, limitValue string, checkValue string) {
patchLimit := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "cluster-monitoring-config", "-p", `{"data": {"config.yaml": "prometheusK8s:\n enforcedBodySizeLimit: `+limitValue+`"}}`, "--type=merge", "-n", "openshift-monitoring").Execute()
o.Expect(patchLimit).NotTo(o.HaveOccurred())
e2e.Logf("failed to patch enforcedBodySizeLimit value: %v", limitValue)
checkLimit := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
limit, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "bash", "-c", "cat /etc/prometheus/config_out/prometheus.env.yaml | grep body_size_limit | uniq").Output()
if err != nil || !strings.Contains(limit, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(checkLimit, "failed to check limit")
}
// check remote write config in the pod
func checkRmtWrtConfig(oc *exutil.CLI, ns string, podName string, checkValue string) {
envCheck := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 360*time.Second, false, func(context.Context) (bool, error) {
envOutput, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, "-c", "prometheus", podName, "--", "bash", "-c", fmt.Sprintf(`cat "/etc/prometheus/config_out/prometheus.env.yaml" | grep '%s'`, checkValue)).Output()
if err != nil || !strings.Contains(envOutput, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(envCheck, "failed to check remote write config")
}
// check Alerts or Metrics are not exist, Metrics is more recommended to use util `checkMetric`
func checkAlertNotExist(oc *exutil.CLI, url, token, alertName string, timeout time.Duration) {
cmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + url
err := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
chk, err := exutil.RemoteShPod(oc, "openshift-monitoring", "prometheus-k8s-0", "sh", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
if err != nil || strings.Contains(chk, alertName) {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Target alert found: %s", alertName))
}
// check alertmanager config in the pod
func checkAlertmanagerConfig(oc *exutil.CLI, ns string, podName string, checkValue string, expectExist bool) {
envCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
envOutput, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, "-c", "alertmanager", podName, "--", "bash", "-c", fmt.Sprintf(`cat /etc/alertmanager/config_out/alertmanager.env.yaml | grep '%s'`, checkValue)).Output()
if expectExist {
if err != nil || !strings.Contains(envOutput, checkValue) {
return false, nil
}
return true, nil
}
if !expectExist {
if !strings.Contains(envOutput, checkValue) {
return true, nil
}
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(envCheck, "failed to check alertmanager config")
}
// check prometheus config in the pod
func checkPrometheusConfig(oc *exutil.CLI, ns string, podName string, checkValue string, expectExist bool) {
envCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 300*time.Second, false, func(context.Context) (bool, error) {
envOutput, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, "-c", "prometheus", podName, "--", "bash", "-c", fmt.Sprintf(`cat /etc/prometheus/config_out/prometheus.env.yaml | grep '%s'`, checkValue)).Output()
if expectExist {
if err != nil || !strings.Contains(envOutput, checkValue) {
return false, nil
}
return true, nil
}
if !expectExist {
if err != nil || !strings.Contains(envOutput, checkValue) {
return true, nil
}
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(envCheck, "failed to check prometheus config")
}
// check configuration in the pod in the given time for specific container
func checkConfigInPod(oc *exutil.CLI, namespace string, podName string, containerName string, cmd string, checkValue string) {
podCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
Output, err := exutil.RemoteShPodWithBashSpecifyContainer(oc, namespace, podName, containerName, cmd)
if err != nil || !strings.Contains(Output, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(podCheck, "failed to check configuration in the pod")
}
// check specific pod logs in container
func checkLogsInContainer(oc *exutil.CLI, namespace string, podName string, containerName string, checkValue string) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
Output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", namespace, podName, "-c", containerName).Output()
if err != nil || !strings.Contains(Output, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("failed to find \"%s\" in the pod logs", checkValue))
}
// get specific pod name with label then describe pod info
func getSpecPodInfo(oc *exutil.CLI, ns string, label string, checkValue string) {
envCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
podName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[].metadata.name}").Output()
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", podName, "-n", ns).Output()
if err != nil || !strings.Contains(output, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(envCheck, fmt.Sprintf("failed to find \"%s\" in the pod yaml", checkValue))
}
// check pods with label that are fully deleted
func checkPodDeleted(oc *exutil.CLI, ns string, label string, checkValue string) {
podCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Output()
if err != nil || strings.Contains(output, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(podCheck, fmt.Sprintf("found \"%s\" exist or not fully deleted", checkValue))
}
// query monitoring metrics, alerts from a specific pod
func queryFromPod(oc *exutil.CLI, url, token, ns, pod, container, metricString string, timeout time.Duration) {
var metrics string
var err error
getCmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + url
err = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
metrics, err = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, "-c", container, pod, "--", "bash", "-c", getCmd).Output()
if err != nil || !strings.Contains(metrics, metricString) {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The metrics %s failed to contain %s", metrics, metricString))
}
// check config exist or absent in yaml/json
func checkYamlconfig(oc *exutil.CLI, ns string, components string, componentsName string, cmd string, checkValue string, expectExist bool) {
configCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(components, componentsName, cmd, "-n", ns).Output()
if expectExist {
if err != nil || !strings.Contains(output, checkValue) {
e2e.Logf("output: \n%v", output)
return false, nil
}
return true, nil
}
if !expectExist {
if err != nil || !strings.Contains(output, checkValue) {
return true, nil
}
return false, nil
}
e2e.Logf("output: \n%v", output)
return false, nil
})
exutil.AssertWaitPollNoErr(configCheck, fmt.Sprintf("base on `expectExist=%v`, did (not) find \"%s\" exist", expectExist, checkValue))
}
// check logs through label
func checkLogWithLabel(oc *exutil.CLI, namespace string, label string, containerName string, checkValue string, expectExist bool) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", namespace, "-l", label, "-c", containerName, "--tail=-1").Output()
if expectExist {
if err != nil || !strings.Contains(output, checkValue) {
return false, nil
}
return true, nil
}
if !expectExist {
if err != nil || !strings.Contains(output, checkValue) {
return true, nil
}
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("failed to find \"%s\" in the pod logs", checkValue))
}
// assertPodToBeReady poll pod status to determine it is ready, skip check when pods do not exist.
func assertPodToBeReady(oc *exutil.CLI, podName string, namespace string) {
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 3*time.Minute, false, func(context.Context) (bool, error) {
stdout, err := oc.AsAdmin().Run("get").Args("pod", podName, "-n", namespace, "--ignore-not-found", "-o", "jsonpath='{.status.conditions[?(@.type==\"Ready\")].status}'").Output()
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
if strings.Contains(stdout, "True") {
e2e.Logf("Pod %s is ready!", podName)
return true, nil
}
if stdout == "" {
e2e.Logf("ignore check, Pod %s is not found", podName)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pod %s status is not ready!", podName))
}
// use exec command to check configs/files inside the pod
func checkConfigInsidePod(oc *exutil.CLI, ns string, container string, pod string, cmd string, checkValue string, expectExist bool) {
configCheck := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 360*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, "-c", container, pod, "--", "bash", "-c", cmd).Output()
if expectExist {
if err != nil || !strings.Contains(output, checkValue) {
return false, nil
}
return true, nil
}
if !expectExist {
if err != nil || !strings.Contains(output, checkValue) {
return true, nil
}
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(configCheck, fmt.Sprintf("base on `expectExist=%v`, did (not) find \"%s\" exist", expectExist, checkValue))
}
// ensures the pod remains in Ready state for a specific duration
func ensurePodRemainsReady(oc *exutil.CLI, podName string, namespace string, timeout time.Duration, interval time.Duration) {
endTime := time.Now().Add(timeout)
for time.Now().Before(endTime) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-n", namespace).Output()
if err != nil || !strings.Contains(output, "Running") {
e2e.Logf("Pod %s is not in Running state, err: %v\n", podName, err)
} else {
e2e.Logf("Pod %s is Running and Ready\n", podName)
}
time.Sleep(interval)
}
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get the pod %s in namespace %s", podName, namespace)
o.Expect(strings.Contains(output, "Running")).To(o.BeTrue(), "Pod %s did not remain Ready within the given timeout", podName)
}
// getAllRunningPodsWithLabel get array of all running pods for a given namespace and label
func getAllRunningPodsWithLabel(oc *exutil.CLI, namespace string, label string) ([]string, error) {
pods, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", label, "--field-selector=status.phase=Running").Template("{{range .items}}{{.metadata.name}}{{\" \"}}{{end}}").Output()
if len(pods) == 0 {
return []string{}, err
}
return strings.Split(pods, " "), err
}
// alertmanagerTestPodCheck poll check on alertmanager-test-alertmanager-0 pod until ready
func alertmanagerTestPodCheck(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
podStats, err := oc.AsAdmin().Run("get").Args("pod", "alertmanager-test-alertmanager-0", "-n", "openshift-user-workload-monitoring").Output()
if err != nil || strings.Contains(podStats, "not found") {
return false, nil
}
if err != nil || strings.Contains(podStats, "Init:0/1") {
return false, nil
}
if err != nil || strings.Contains(podStats, "ContainerCreating") {
return false, nil
}
e2e.Logf("pod is ready: \n%v", podStats)
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod not created")
}
// getReadyPodsWithLabels poll check pod through a given label until pod is ready
func getReadyPodsWithLabels(oc *exutil.CLI, ns string, label string) {
podCheck := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 10*time.Minute, true, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Output()
if err != nil || strings.Contains(output, "Terminating") ||
strings.Contains(output, "ContainerCreating") ||
strings.Contains(output, "Pending") ||
strings.Contains(output, "ErrImagePull") ||
strings.Contains(output, "CrashLoopBackOff") ||
strings.Contains(output, "ImagePullBackOff") {
return false, nil
}
return true, nil
})
if podCheck != nil {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Output()
e2e.Logf("pods not ready: \n%v", output)
}
exutil.AssertWaitPollNoErr(podCheck, "some pods are not ready!")
}
// getNodesWithLabel get array of all node for a given label
func getNodesWithLabel(oc *exutil.CLI, label string) ([]string, error) {
nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", label, "-ojsonpath={.items[*].metadata.name}").Output()
if len(nodes) == 0 {
e2e.Logf("target node names: \n%v", nodes)
return []string{}, err
}
return strings.Split(nodes, " "), err
}
// isSNOEnvironment confirm whether this env is single node cluster
func isSNOEnvironment(oc *exutil.CLI) (bool, error) {
nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
if err != nil {
return false, err
}
nodeList := strings.Split(nodes, " ")
if len(nodeList) <= 1 {
e2e.Logf("Detected SNO environment with %d node(s)", len(nodeList))
return true, nil
}
e2e.Logf("Detected multi-node environment with %d nodes", len(nodeList))
return false, nil
}
// checkPodDisruptionBudgetIfNotSNO check pdb if its not sno env
func checkPodDisruptionBudgetIfNotSNO(oc *exutil.CLI) {
isSNO, err := isSNOEnvironment(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if isSNO {
exutil.By("Skipping PodDisruptionBudget check in SNO environment")
return
}
exutil.By("Waiting for PodDisruptionBudget to be available in multi-node environment")
err = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 120*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", "monitoring-plugin", "-n", "openshift-monitoring").Output()
if err != nil {
return false, nil
}
if !strings.Contains(output, "not found") {
return true, nil
}
return false, nil
})
o.Expect(err).NotTo(o.HaveOccurred(), "PodDisruptionBudget monitoring-plugin was not found within the timeout period")
exutil.By("Checking PodDisruptionBudget after it is ready")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", "monitoring-plugin", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.ContainSubstring("not found"))
}
func getDeploymentReplicas(oc *exutil.CLI, ns string, deployName string) (int, error) {
var expectedReplicas int
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", deployName, "-n", ns, "-o", "jsonpath={.spec.replicas}").Output()
if err != nil {
e2e.Logf("Failed to get deployment %s: %v", deployName, err)
return false, nil
}
expectedReplicas, err = strconv.Atoi(output)
if err != nil {
e2e.Logf("Failed to parse replica count for deployment %s: %v", deployName, err)
return false, nil
}
if expectedReplicas >= 1 {
return true, nil
}
return false, nil
})
if err != nil {
return 0, fmt.Errorf("failed to get replica count for deployment %s: %v", deployName, err)
}
e2e.Logf("Deployment %s expects %d replicas", deployName, expectedReplicas)
return expectedReplicas, nil
}
// waitForPodsToMatchReplicas Poll to check if the number of running Pods matches the number of replicas expected by the Deployment
func waitForPodsToMatchReplicas(oc *exutil.CLI, namespace string, deployName string, label string) {
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 10*time.Minute, true, func(ctx context.Context) (bool, error) {
expectedReplicas, err := getDeploymentReplicas(oc, namespace, deployName)
if err != nil {
e2e.Logf("Error getting expected replicas: %v", err)
return false, nil
}
runningPods, err := getAllRunningPodsWithLabel(oc, namespace, label)
if err != nil {
e2e.Logf("Error getting running pods: %v", err)
return false, nil
}
if len(runningPods) != expectedReplicas {
e2e.Logf("Mismatch: expected %d running pods, but found %d", expectedReplicas, len(runningPods))
return false, nil
}
e2e.Logf("Pods match expected replicas: %d/%d", len(runningPods), expectedReplicas)
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Pods did not reach the expected number!")
}
|
package monitoring
| ||||
function
|
openshift/openshift-tests-private
|
f7c37760-c57b-423a-aa4b-7df5a2da1f7d
|
create
|
['"fmt"', '"strings"']
|
['monitoringConfig']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func (cm *monitoringConfig) create(oc *exutil.CLI) {
if !checkConfigMap(oc, "openshift-monitoring", "cluster-monitoring-config") {
e2e.Logf("Create configmap: cluster-monitoring-config")
output, err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cm.template, "-p", "NAME="+cm.name, "NAMESPACE="+cm.namespace, "ENABLEUSERWORKLOAD="+fmt.Sprintf("%v", cm.enableUserWorkload))
if err != nil {
if strings.Contains(output, "AlreadyExists") {
err = nil
}
}
o.Expect(err).NotTo(o.HaveOccurred())
}
}
|
monitoring
| |||
function
|
openshift/openshift-tests-private
|
9c393cb7-4b10-41a2-9f60-73044ffe3a45
|
createUWMConfig
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func createUWMConfig(oc *exutil.CLI, uwmMonitoringConfig string) {
if !checkConfigMap(oc, "openshift-user-workload-monitoring", "user-workload-monitoring-config") {
e2e.Logf("Create configmap: user-workload-monitoring-config")
output, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", uwmMonitoringConfig).Output()
if err != nil {
if strings.Contains(output, "AlreadyExists") {
err = nil
}
}
o.Expect(err).NotTo(o.HaveOccurred())
}
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
23654edf-c825-477c-86aa-bbc1aaf360e7
|
checkConfigMap
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkConfigMap(oc *exutil.CLI, ns, configmapName string) bool {
searchOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", configmapName, "-n", ns, "-o=jsonpath={.data.config\\.yaml}").Output()
if err != nil {
return false
}
if strings.Contains(searchOutput, "retention") {
return true
}
return false
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
f51ad653-4a75-4019-8a19-46b35273a0d8
|
getRandomString
|
['"math/rand"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
ea715604-3094-4336-bddd-398f8097dc5c
|
applyResourceFromTemplate
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func applyResourceFromTemplate(oc *exutil.CLI, parameters ...string) (string, error) {
var configFile string
err := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, 15*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "cluster-monitoring.json")
if err != nil {
return false, nil
}
configFile = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters))
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Output()
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
cf728a62-645a-4df4-a205-c392b2b5a9e7
|
labelNameSpace
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func labelNameSpace(oc *exutil.CLI, namespace string, label string) {
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", namespace, label, "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The namespace %s is labeled by %q", namespace, label)
}
|
monitoring
| |||||
function
|
openshift/openshift-tests-private
|
92f06b09-6415-43af-8a68-603f8dad4bbb
|
getSAToken
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func getSAToken(oc *exutil.CLI, account, ns string) string {
e2e.Logf("Getting a token assigned to specific serviceaccount from %s namespace...", ns)
token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", account, "-n", ns).Output()
if err != nil {
if strings.Contains(token, "unknown command") {
token, err = oc.AsAdmin().WithoutNamespace().Run("sa").Args("get-token", account, "-n", ns).Output()
}
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(token).NotTo(o.BeEmpty())
return token
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
bbff354c-ab98-445c-9d72-3feeb67ef4e7
|
checkMetric
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkMetric(oc *exutil.CLI, url, token, metricString string, timeout time.Duration) {
var metrics string
var err error
getCmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + url
err = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
metrics, err = exutil.RemoteShPod(oc, "openshift-monitoring", "prometheus-k8s-0", "sh", "-c", getCmd)
if err != nil || !strings.Contains(metrics, metricString) {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The metrics %s failed to contain %s", metrics, metricString))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
6e82b1f5-674b-45f3-b313-2755713fdc9e
|
createResourceFromYaml
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func createResourceFromYaml(oc *exutil.CLI, ns, yamlFile string) {
err := oc.AsAdmin().Run("apply").Args("-n", ns, "-f", yamlFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
monitoring
| |||||
function
|
openshift/openshift-tests-private
|
82d1b9b2-d001-44fd-aee9-c840cb07cc59
|
deleteBindMonitoringViewRoleToDefaultSA
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func deleteBindMonitoringViewRoleToDefaultSA(oc *exutil.CLI, uwmFederateRBACViewName string) {
err := oc.AdminKubeClient().RbacV1().ClusterRoleBindings().Delete(context.Background(), uwmFederateRBACViewName, metav1.DeleteOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
30b666ee-3a4c-46bd-bf84-7475a863e667
|
bindMonitoringViewRoleToDefaultSA
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func bindMonitoringViewRoleToDefaultSA(oc *exutil.CLI, ns, uwmFederateRBACViewName string) (*rbacv1.ClusterRoleBinding, error) {
return oc.AdminKubeClient().RbacV1().ClusterRoleBindings().Create(context.Background(), &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: uwmFederateRBACViewName,
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: "cluster-monitoring-view",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: "default",
Namespace: ns,
},
},
}, metav1.CreateOptions{})
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
d68135d5-aad7-4e89-9d27-d371b13d3bf3
|
deleteClusterRoleBinding
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func deleteClusterRoleBinding(oc *exutil.CLI, clusterRoleBindingName string) {
err := oc.AdminKubeClient().RbacV1().ClusterRoleBindings().Delete(context.Background(), clusterRoleBindingName, metav1.DeleteOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
fee37c87-550a-4bec-9814-32e28b55223f
|
bindClusterRoleToUser
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func bindClusterRoleToUser(oc *exutil.CLI, clusterRoleName, userName, clusterRoleBindingName string) (*rbacv1.ClusterRoleBinding, error) {
return oc.AdminKubeClient().RbacV1().ClusterRoleBindings().Create(context.Background(), &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleBindingName,
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: clusterRoleName,
},
Subjects: []rbacv1.Subject{
{
Kind: "User",
Name: userName,
},
},
}, metav1.CreateOptions{})
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
b3367309-a194-430f-a101-d8f51e45a013
|
checkRoute
|
['"context"', '"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkRoute(oc *exutil.CLI, ns, name, token, queryString, metricString string, timeout time.Duration) {
var metrics string
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
path, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", name, "-n", ns, "-o=jsonpath={.spec.path}").Output()
if err != nil {
return false, nil
}
host, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", name, "-n", ns, "-o=jsonpath={.spec.host}").Output()
if err != nil {
return false, nil
}
metricCMD := fmt.Sprintf("curl -G -s -k -H \"Authorization: Bearer %s\" https://%s%s --data-urlencode '%s'", token, host, path, queryString)
curlOutput, err := exec.Command("bash", "-c", metricCMD).Output()
if err != nil {
return false, nil
}
metrics = string(curlOutput)
if !strings.Contains(metrics, metricString) {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The metrics %s failed to contain %s", metrics, metricString))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
f27faefe-6dd9-4adc-8404-5c72b30cfdd2
|
checkRetention
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkRetention(oc *exutil.CLI, ns string, sts string, expectedRetention string, timeout time.Duration) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
stsObject, err := oc.AdminKubeClient().AppsV1().StatefulSets(ns).Get(context.Background(), sts, metav1.GetOptions{})
if err != nil {
return false, nil
}
args := stsObject.Spec.Template.Spec.Containers[0].Args
for _, v := range args {
if strings.Contains(v, expectedRetention) {
return true, nil
}
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("the retention of %s is not expected %s", sts, expectedRetention))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
00842e01-e764-4939-8b4a-891a77c06b8e
|
deleteConfig
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func deleteConfig(oc *exutil.CLI, configName, ns string) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ConfigMap", configName, "-n", ns, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
monitoring
| |||||
function
|
openshift/openshift-tests-private
|
c1b421df-c71c-42ff-b192-712e8668ad95
|
patchAndCheckBodySizeLimit
|
['"context"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func patchAndCheckBodySizeLimit(oc *exutil.CLI, limitValue string, checkValue string) {
patchLimit := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "cluster-monitoring-config", "-p", `{"data": {"config.yaml": "prometheusK8s:\n enforcedBodySizeLimit: `+limitValue+`"}}`, "--type=merge", "-n", "openshift-monitoring").Execute()
o.Expect(patchLimit).NotTo(o.HaveOccurred())
e2e.Logf("failed to patch enforcedBodySizeLimit value: %v", limitValue)
checkLimit := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
limit, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "bash", "-c", "cat /etc/prometheus/config_out/prometheus.env.yaml | grep body_size_limit | uniq").Output()
if err != nil || !strings.Contains(limit, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(checkLimit, "failed to check limit")
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
52e8e9ed-fdd4-454f-aec5-552dcc65380b
|
checkRmtWrtConfig
|
['"context"', '"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkRmtWrtConfig(oc *exutil.CLI, ns string, podName string, checkValue string) {
envCheck := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 360*time.Second, false, func(context.Context) (bool, error) {
envOutput, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, "-c", "prometheus", podName, "--", "bash", "-c", fmt.Sprintf(`cat "/etc/prometheus/config_out/prometheus.env.yaml" | grep '%s'`, checkValue)).Output()
if err != nil || !strings.Contains(envOutput, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(envCheck, "failed to check remote write config")
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
a7d0a981-7b0e-4906-bca6-c82688bb4cae
|
checkAlertNotExist
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkAlertNotExist(oc *exutil.CLI, url, token, alertName string, timeout time.Duration) {
cmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + url
err := wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
chk, err := exutil.RemoteShPod(oc, "openshift-monitoring", "prometheus-k8s-0", "sh", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
if err != nil || strings.Contains(chk, alertName) {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Target alert found: %s", alertName))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
a2ae5df9-6021-4aa5-baa4-9eba1e5c0a86
|
checkAlertmanagerConfig
|
['"context"', '"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkAlertmanagerConfig(oc *exutil.CLI, ns string, podName string, checkValue string, expectExist bool) {
envCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
envOutput, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, "-c", "alertmanager", podName, "--", "bash", "-c", fmt.Sprintf(`cat /etc/alertmanager/config_out/alertmanager.env.yaml | grep '%s'`, checkValue)).Output()
if expectExist {
if err != nil || !strings.Contains(envOutput, checkValue) {
return false, nil
}
return true, nil
}
if !expectExist {
if !strings.Contains(envOutput, checkValue) {
return true, nil
}
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(envCheck, "failed to check alertmanager config")
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
c3dd3200-bff8-4d6a-a91e-940dbc28b20c
|
checkPrometheusConfig
|
['"context"', '"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkPrometheusConfig(oc *exutil.CLI, ns string, podName string, checkValue string, expectExist bool) {
envCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 300*time.Second, false, func(context.Context) (bool, error) {
envOutput, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, "-c", "prometheus", podName, "--", "bash", "-c", fmt.Sprintf(`cat /etc/prometheus/config_out/prometheus.env.yaml | grep '%s'`, checkValue)).Output()
if expectExist {
if err != nil || !strings.Contains(envOutput, checkValue) {
return false, nil
}
return true, nil
}
if !expectExist {
if err != nil || !strings.Contains(envOutput, checkValue) {
return true, nil
}
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(envCheck, "failed to check prometheus config")
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
30cca3f8-9b42-4ce3-901a-743eb195703b
|
checkConfigInPod
|
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkConfigInPod(oc *exutil.CLI, namespace string, podName string, containerName string, cmd string, checkValue string) {
podCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
Output, err := exutil.RemoteShPodWithBashSpecifyContainer(oc, namespace, podName, containerName, cmd)
if err != nil || !strings.Contains(Output, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(podCheck, "failed to check configuration in the pod")
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
a7eb0669-29ef-4515-9282-41c7409a650f
|
checkLogsInContainer
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkLogsInContainer(oc *exutil.CLI, namespace string, podName string, containerName string, checkValue string) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
Output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", namespace, podName, "-c", containerName).Output()
if err != nil || !strings.Contains(Output, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("failed to find \"%s\" in the pod logs", checkValue))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
32fa0d1e-1e22-4d89-b6dd-36317d720b82
|
getSpecPodInfo
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func getSpecPodInfo(oc *exutil.CLI, ns string, label string, checkValue string) {
envCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
podName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[].metadata.name}").Output()
output, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", podName, "-n", ns).Output()
if err != nil || !strings.Contains(output, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(envCheck, fmt.Sprintf("failed to find \"%s\" in the pod yaml", checkValue))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
0cac2c22-0363-4b99-af9d-460a3cb13504
|
checkPodDeleted
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkPodDeleted(oc *exutil.CLI, ns string, label string, checkValue string) {
podCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Output()
if err != nil || strings.Contains(output, checkValue) {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(podCheck, fmt.Sprintf("found \"%s\" exist or not fully deleted", checkValue))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
3ce43692-cfa8-467a-80ba-d8680f814a58
|
queryFromPod
|
['"context"', '"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func queryFromPod(oc *exutil.CLI, url, token, ns, pod, container, metricString string, timeout time.Duration) {
var metrics string
var err error
getCmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + url
err = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
metrics, err = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, "-c", container, pod, "--", "bash", "-c", getCmd).Output()
if err != nil || !strings.Contains(metrics, metricString) {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The metrics %s failed to contain %s", metrics, metricString))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
1a9945bc-43b7-4b24-80a5-5c104a713c29
|
checkYamlconfig
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkYamlconfig(oc *exutil.CLI, ns string, components string, componentsName string, cmd string, checkValue string, expectExist bool) {
configCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(components, componentsName, cmd, "-n", ns).Output()
if expectExist {
if err != nil || !strings.Contains(output, checkValue) {
e2e.Logf("output: \n%v", output)
return false, nil
}
return true, nil
}
if !expectExist {
if err != nil || !strings.Contains(output, checkValue) {
return true, nil
}
return false, nil
}
e2e.Logf("output: \n%v", output)
return false, nil
})
exutil.AssertWaitPollNoErr(configCheck, fmt.Sprintf("base on `expectExist=%v`, did (not) find \"%s\" exist", expectExist, checkValue))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
6e84abf2-f474-4303-a9b2-22fd93cba56a
|
checkLogWithLabel
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkLogWithLabel(oc *exutil.CLI, namespace string, label string, containerName string, checkValue string, expectExist bool) {
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 240*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", namespace, "-l", label, "-c", containerName, "--tail=-1").Output()
if expectExist {
if err != nil || !strings.Contains(output, checkValue) {
return false, nil
}
return true, nil
}
if !expectExist {
if err != nil || !strings.Contains(output, checkValue) {
return true, nil
}
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("failed to find \"%s\" in the pod logs", checkValue))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
1f5d09b6-cab9-4c14-8f77-510937415e3f
|
assertPodToBeReady
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func assertPodToBeReady(oc *exutil.CLI, podName string, namespace string) {
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 3*time.Minute, false, func(context.Context) (bool, error) {
stdout, err := oc.AsAdmin().Run("get").Args("pod", podName, "-n", namespace, "--ignore-not-found", "-o", "jsonpath='{.status.conditions[?(@.type==\"Ready\")].status}'").Output()
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
if strings.Contains(stdout, "True") {
e2e.Logf("Pod %s is ready!", podName)
return true, nil
}
if stdout == "" {
e2e.Logf("ignore check, Pod %s is not found", podName)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Pod %s status is not ready!", podName))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
620de830-d99f-44fb-a4a3-215c322302b1
|
checkConfigInsidePod
|
['"context"', '"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkConfigInsidePod(oc *exutil.CLI, ns string, container string, pod string, cmd string, checkValue string, expectExist bool) {
configCheck := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 360*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, "-c", container, pod, "--", "bash", "-c", cmd).Output()
if expectExist {
if err != nil || !strings.Contains(output, checkValue) {
return false, nil
}
return true, nil
}
if !expectExist {
if err != nil || !strings.Contains(output, checkValue) {
return true, nil
}
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(configCheck, fmt.Sprintf("base on `expectExist=%v`, did (not) find \"%s\" exist", expectExist, checkValue))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
a3833d62-4770-470b-ba74-c0174d896f56
|
ensurePodRemainsReady
|
['"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func ensurePodRemainsReady(oc *exutil.CLI, podName string, namespace string, timeout time.Duration, interval time.Duration) {
endTime := time.Now().Add(timeout)
for time.Now().Before(endTime) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-n", namespace).Output()
if err != nil || !strings.Contains(output, "Running") {
e2e.Logf("Pod %s is not in Running state, err: %v\n", podName, err)
} else {
e2e.Logf("Pod %s is Running and Ready\n", podName)
}
time.Sleep(interval)
}
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get the pod %s in namespace %s", podName, namespace)
o.Expect(strings.Contains(output, "Running")).To(o.BeTrue(), "Pod %s did not remain Ready within the given timeout", podName)
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
2be74481-3a4a-47bf-b289-964975bbe09a
|
getAllRunningPodsWithLabel
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func getAllRunningPodsWithLabel(oc *exutil.CLI, namespace string, label string) ([]string, error) {
pods, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", label, "--field-selector=status.phase=Running").Template("{{range .items}}{{.metadata.name}}{{\" \"}}{{end}}").Output()
if len(pods) == 0 {
return []string{}, err
}
return strings.Split(pods, " "), err
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
babe44db-9638-4457-a68d-0100b8a9748b
|
alertmanagerTestPodCheck
|
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func alertmanagerTestPodCheck(oc *exutil.CLI) {
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
podStats, err := oc.AsAdmin().Run("get").Args("pod", "alertmanager-test-alertmanager-0", "-n", "openshift-user-workload-monitoring").Output()
if err != nil || strings.Contains(podStats, "not found") {
return false, nil
}
if err != nil || strings.Contains(podStats, "Init:0/1") {
return false, nil
}
if err != nil || strings.Contains(podStats, "ContainerCreating") {
return false, nil
}
e2e.Logf("pod is ready: \n%v", podStats)
return true, nil
})
exutil.AssertWaitPollNoErr(err, "pod not created")
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
c0819725-4437-4bf8-8611-b06840c3c362
|
getReadyPodsWithLabels
|
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func getReadyPodsWithLabels(oc *exutil.CLI, ns string, label string) {
podCheck := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 10*time.Minute, true, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Output()
if err != nil || strings.Contains(output, "Terminating") ||
strings.Contains(output, "ContainerCreating") ||
strings.Contains(output, "Pending") ||
strings.Contains(output, "ErrImagePull") ||
strings.Contains(output, "CrashLoopBackOff") ||
strings.Contains(output, "ImagePullBackOff") {
return false, nil
}
return true, nil
})
if podCheck != nil {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label).Output()
e2e.Logf("pods not ready: \n%v", output)
}
exutil.AssertWaitPollNoErr(podCheck, "some pods are not ready!")
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
0ea7b5e7-086b-4af6-b3ee-236b451f0945
|
getNodesWithLabel
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func getNodesWithLabel(oc *exutil.CLI, label string) ([]string, error) {
nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", label, "-ojsonpath={.items[*].metadata.name}").Output()
if len(nodes) == 0 {
e2e.Logf("target node names: \n%v", nodes)
return []string{}, err
}
return strings.Split(nodes, " "), err
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
fdf3fa35-0e67-4671-965c-241e1c3716d3
|
isSNOEnvironment
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func isSNOEnvironment(oc *exutil.CLI) (bool, error) {
nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
if err != nil {
return false, err
}
nodeList := strings.Split(nodes, " ")
if len(nodeList) <= 1 {
e2e.Logf("Detected SNO environment with %d node(s)", len(nodeList))
return true, nil
}
e2e.Logf("Detected multi-node environment with %d nodes", len(nodeList))
return false, nil
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
e68e209e-592d-44d3-a959-0d6e2c4c46bd
|
checkPodDisruptionBudgetIfNotSNO
|
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func checkPodDisruptionBudgetIfNotSNO(oc *exutil.CLI) {
isSNO, err := isSNOEnvironment(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if isSNO {
exutil.By("Skipping PodDisruptionBudget check in SNO environment")
return
}
exutil.By("Waiting for PodDisruptionBudget to be available in multi-node environment")
err = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 120*time.Second, false, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", "monitoring-plugin", "-n", "openshift-monitoring").Output()
if err != nil {
return false, nil
}
if !strings.Contains(output, "not found") {
return true, nil
}
return false, nil
})
o.Expect(err).NotTo(o.HaveOccurred(), "PodDisruptionBudget monitoring-plugin was not found within the timeout period")
exutil.By("Checking PodDisruptionBudget after it is ready")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", "monitoring-plugin", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.ContainSubstring("not found"))
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
85c7668b-eece-433a-8431-ad72383907e8
|
getDeploymentReplicas
|
['"context"', '"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func getDeploymentReplicas(oc *exutil.CLI, ns string, deployName string) (int, error) {
var expectedReplicas int
err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", deployName, "-n", ns, "-o", "jsonpath={.spec.replicas}").Output()
if err != nil {
e2e.Logf("Failed to get deployment %s: %v", deployName, err)
return false, nil
}
expectedReplicas, err = strconv.Atoi(output)
if err != nil {
e2e.Logf("Failed to parse replica count for deployment %s: %v", deployName, err)
return false, nil
}
if expectedReplicas >= 1 {
return true, nil
}
return false, nil
})
if err != nil {
return 0, fmt.Errorf("failed to get replica count for deployment %s: %v", deployName, err)
}
e2e.Logf("Deployment %s expects %d replicas", deployName, expectedReplicas)
return expectedReplicas, nil
}
|
monitoring
| ||||
function
|
openshift/openshift-tests-private
|
200d0ce1-0a67-4348-9d7c-67417e520fb3
|
waitForPodsToMatchReplicas
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring_utils.go
|
func waitForPodsToMatchReplicas(oc *exutil.CLI, namespace string, deployName string, label string) {
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 10*time.Minute, true, func(ctx context.Context) (bool, error) {
expectedReplicas, err := getDeploymentReplicas(oc, namespace, deployName)
if err != nil {
e2e.Logf("Error getting expected replicas: %v", err)
return false, nil
}
runningPods, err := getAllRunningPodsWithLabel(oc, namespace, label)
if err != nil {
e2e.Logf("Error getting running pods: %v", err)
return false, nil
}
if len(runningPods) != expectedReplicas {
e2e.Logf("Mismatch: expected %d running pods, but found %d", expectedReplicas, len(runningPods))
return false, nil
}
e2e.Logf("Pods match expected replicas: %d/%d", len(runningPods), expectedReplicas)
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Pods did not reach the expected number!")
}
|
monitoring
| ||||
test
|
openshift/openshift-tests-private
|
1c02329e-a285-41a1-88b3-93d588d8a7ce
|
observability_operator
|
import (
"path/filepath"
o "github.com/onsi/gomega"
g "github.com/onsi/ginkgo/v2"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
)
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator.go
|
package monitoring
import (
"path/filepath"
o "github.com/onsi/gomega"
g "github.com/onsi/ginkgo/v2"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
)
var _ = g.Describe("[sig-monitoring] Cluster_Observability Observability Operator ConnectedOnly", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIForKubeOpenShift("obo-" + getRandomString())
oboBaseDir = exutil.FixturePath("testdata", "monitoring", "observabilityoperator")
clID string
region string
)
g.BeforeEach(func() {
baseCapSet, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].spec.capabilities.baselineCapabilitySet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if baseCapSet == "None" {
g.Skip("Skip the COO tests for basecapset is none")
}
architecture.SkipNonAmd64SingleArch(oc)
clID, region = getClusterDetails(oc)
exutil.By("Install Observability Operator and check if it is successfully installed") //57234-Observability Operator installation on OCP hypershift management
if !exutil.IsROSACluster(oc) && !ifMonitoringStackCRDExists(oc) {
createObservabilityOperator(oc, oboBaseDir)
}
})
g.It("Author:Vibhu-HyperShiftMGMT-ROSA-LEVEL0-Critical-57236-Critical-57239-create monitoringstack and check config & metrics on hypershift", func() {
msD := monitoringStackDescription{
name: "hypershift-monitoring-stack",
clusterID: clID,
region: region,
namespace: "openshift-observability-operator",
secretName: "rhobs-hypershift-credential",
tokenURL: "https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token",
url: "https://rhobs.rhobsp02ue1.api.openshift.com/api/metrics/v1/hypershift-platform/api/v1/receive",
template: filepath.Join(oboBaseDir, "monitoringstack.yaml"),
}
secD := monitoringStackSecretDescription{
name: "rhobs-hypershift-credential",
namespace: "openshift-observability-operator",
template: filepath.Join(oboBaseDir, "monitoringstack-secret.yaml"),
}
defer func() {
if !exutil.IsROSACluster(oc) {
deleteMonitoringStack(oc, msD, secD, "rosa_mc")
}
}()
exutil.By("Check observability operator pods liveliness")
checkOperatorPods(oc)
if !exutil.IsROSACluster(oc) {
exutil.By("Create monitoringstack CR")
createMonitoringStack(oc, msD, secD)
}
exutil.By("Check remote write config")
checkRemoteWriteConfig(oc, msD)
exutil.By("Check monitoringStack has correct clusterID region and status")
checkMonitoringStackDetails(oc, msD, "rosa_mc")
})
g.It("Author:Vibhu-LEVEL0-Critical-57440-observability operator uninstall [Serial]", func() {
defer deleteOperator(oc)
exutil.By("Delete ObservabilityOperator")
})
g.It("Author:Vibhu-HyperShiftMGMT-ROSA-High-55352-observability operator self monitoring", func() {
exutil.By("Check observability operator monitoring")
checkOperatorMonitoring(oc, oboBaseDir)
})
g.It("Author:Vibhu-HyperShiftMGMT-ROSA-LEVEL0-Critical-55349-verify observability operator", func() {
exutil.By("Check the label in namespace")
checkLabel(oc)
exutil.By("Check observability operator pods")
checkOperatorPods(oc)
exutil.By("Check liveliness/readiness probes implemented in observability operator pod")
checkPodHealth(oc)
})
g.It("Author:Vibhu-HyperShiftMGMT-ROSA-High-59383-verify OBO discovered and collected metrics of HCP", func() {
if exutil.IsROSACluster(oc) {
exutil.By("Check scrape targets")
checkHCPTargets(oc)
exutil.By("Check metric along with value")
checkMetricValue(oc, "rosa_mc")
}
})
g.It("Author:Vibhu-Critical-59384-High-59674-create monitoringstack to discover any target and verify observability operator discovered target and collected metrics of example APP", func() {
defer deleteMonitoringStack(oc, monitoringStackDescription{}, monitoringStackSecretDescription{}, "monitor_example_app")
exutil.By("Create monitoring stack")
createCustomMonitoringStack(oc, oboBaseDir)
exutil.By("Create example app")
oc.SetupProject()
ns := oc.Namespace()
createExampleApp(oc, oboBaseDir, ns)
exutil.By("Check scrape target")
checkExampleAppTarget(oc)
exutil.By("Check metric along with value")
checkMetricValue(oc, "monitor_example_app")
})
// author: [email protected]
g.It("Author:tagao-Critical-78217-COO should pass DAST test [Serial]", func() {
exutil.By("trigger a job to install RapiDAST then scan APIs")
configFile := filepath.Join(oboBaseDir, "rapidastconfig_coo.yaml")
policyFile := filepath.Join(oboBaseDir, "customscan.policy")
_, err := rapidastScan(oc, oc.Namespace(), configFile, policyFile, "coo")
o.Expect(err).NotTo(o.HaveOccurred())
})
})
|
package monitoring
| ||||
test case
|
openshift/openshift-tests-private
|
d0f12d6d-6329-461a-b687-49f1468fb989
|
Author:Vibhu-HyperShiftMGMT-ROSA-LEVEL0-Critical-57236-Critical-57239-create monitoringstack and check config & metrics on hypershift
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator.go
|
g.It("Author:Vibhu-HyperShiftMGMT-ROSA-LEVEL0-Critical-57236-Critical-57239-create monitoringstack and check config & metrics on hypershift", func() {
msD := monitoringStackDescription{
name: "hypershift-monitoring-stack",
clusterID: clID,
region: region,
namespace: "openshift-observability-operator",
secretName: "rhobs-hypershift-credential",
tokenURL: "https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token",
url: "https://rhobs.rhobsp02ue1.api.openshift.com/api/metrics/v1/hypershift-platform/api/v1/receive",
template: filepath.Join(oboBaseDir, "monitoringstack.yaml"),
}
secD := monitoringStackSecretDescription{
name: "rhobs-hypershift-credential",
namespace: "openshift-observability-operator",
template: filepath.Join(oboBaseDir, "monitoringstack-secret.yaml"),
}
defer func() {
if !exutil.IsROSACluster(oc) {
deleteMonitoringStack(oc, msD, secD, "rosa_mc")
}
}()
exutil.By("Check observability operator pods liveliness")
checkOperatorPods(oc)
if !exutil.IsROSACluster(oc) {
exutil.By("Create monitoringstack CR")
createMonitoringStack(oc, msD, secD)
}
exutil.By("Check remote write config")
checkRemoteWriteConfig(oc, msD)
exutil.By("Check monitoringStack has correct clusterID region and status")
checkMonitoringStackDetails(oc, msD, "rosa_mc")
})
| |||||
test case
|
openshift/openshift-tests-private
|
23d2d791-b78c-4c38-b996-d45a2e00a939
|
Author:Vibhu-LEVEL0-Critical-57440-observability operator uninstall [Serial]
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/observability_operator.go
|
g.It("Author:Vibhu-LEVEL0-Critical-57440-observability operator uninstall [Serial]", func() {
defer deleteOperator(oc)
exutil.By("Delete ObservabilityOperator")
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.