element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function
|
openshift/openshift-tests-private
|
d72d703a-e496-4ab2-8456-1385aea11192
|
isHypershiftEnabled
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func isHypershiftEnabled(oc *exutil.CLI) bool {
guestClusterName, guestClusterKubeconfigFile, _ := exutil.ValidHypershiftAndGetGuestKubeConfWithNoSkip(oc)
return (guestClusterName != "" && guestClusterKubeconfigFile != "")
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
3c31e4f0-7715-45a0-8651-012d7af05fdf
|
getFirstHostedCluster
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func getFirstHostedCluster(oc *exutil.CLI) string {
hostedClusterName, _, _ := exutil.ValidHypershiftAndGetGuestKubeConfWithNoSkip(oc)
logger.Infof("first hostedcluster name is %s", hostedClusterName)
return hostedClusterName
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
c276e7b8-61f8-4b88-8fa5-43d8f954b0b0
|
getLatestImageURL
|
['"fmt"', '"os"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func getLatestImageURL(oc *exutil.CLI, release string) (string, string) {
if release == "" {
release = "4.12" // TODO: need to update default major version to 4.13 when 4.12 is GA
}
imageURLFormat := "%s:%s"
registryBaseURL := "registry.ci.openshift.org/ocp/release"
registryQueryURL := fmt.Sprintf(imageURLFormat, registryBaseURL, release)
registryConfig, extractErr := getPullSecret(oc)
defer os.Remove(registryConfig)
o.Expect(extractErr).NotTo(o.HaveOccurred(), "extract registry config from pull secret error")
imageInfo, getImageInfoErr := oc.AsAdmin().WithoutNamespace().Run("image").Args("info", registryQueryURL, "-a", registryConfig, "-ojson").Output()
o.Expect(getImageInfoErr).NotTo(o.HaveOccurred(), "get image info error")
o.Expect(imageInfo).NotTo(o.BeEmpty())
imageJSON := JSON(imageInfo)
buildVersion := imageJSON.Get("config").Get("config").Get("Labels").Get(`io.openshift.release`).ToString()
o.Expect(buildVersion).NotTo(o.BeEmpty(), "nightly build version is empty")
imageDigest := imageJSON.Get("digest").ToString()
o.Expect(imageDigest).NotTo(o.BeEmpty(), "image digest is empty")
imageURL := fmt.Sprintf("%s@%s", registryBaseURL, imageDigest)
logger.Infof("Get latest nigthtly build of %s: %s", release, imageURL)
return imageURL, buildVersion
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
f8ea3a3e-0269-4eb0-b3ad-245d48e992e2
|
skipTestIfSupportedPlatformNotMatched
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func skipTestIfSupportedPlatformNotMatched(oc *exutil.CLI, supported ...string) {
var match bool
p := exutil.CheckPlatform(oc)
for _, sp := range supported {
if strings.EqualFold(sp, p) {
match = true
break
}
}
if !match {
g.Skip(fmt.Sprintf("skip test because current platform %s is not in supported list %v", p, supported))
}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
11597db2-40ed-4645-87e0-bdd75fea3a08
|
IsAROCluster
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func IsAROCluster(oc *exutil.CLI) bool {
return NewResource(oc.AsAdmin(), "clusters.aro.openshift.io", "cluster").Exists()
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
3b16cf94-5087-48ec-944f-e30968b9c022
|
skipTestIfRTKernel
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func skipTestIfRTKernel(oc *exutil.CLI) {
wMcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
isWorkerRT, err := wMcp.IsRealTimeKernel()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error trying to know if realtime kernel is active worker pool")
isMasterRT, err := mMcp.IsRealTimeKernel()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error trying to know if realtime kernel is active master pool")
if isWorkerRT || isMasterRT {
g.Skip("Pools are using real time kernel configuration. This test cannot be executed if the cluster is using RT kernel.")
}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
0d92e79e-0681-4711-bcc2-52e299ee096d
|
skipTestIfExtensionsAreUsed
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func skipTestIfExtensionsAreUsed(oc *exutil.CLI) {
wMcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
wCurrentMC, err := wMcp.GetConfiguredMachineConfig()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error trying to get the current MC configured in worker pool")
mCurrentMC, err := mMcp.GetConfiguredMachineConfig()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error trying to get the current MC configured in master pool")
wExtensions, err := wCurrentMC.GetExtensions()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error trying to get the extensions configured in MC: %s", wCurrentMC.GetName())
mExtensions, err := mCurrentMC.GetExtensions()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error trying to get the extensions configured in MC: %s", mCurrentMC.GetName())
if wExtensions != "[]" || mExtensions != "[]" {
g.Skip("Current cluster is using extensions. This test cannot be execute in a cluster using extensions")
}
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
c7b623e6-cb2e-40c6-8d49-24c6e014f718
|
WorkersCanBeScaled
|
['"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func WorkersCanBeScaled(oc *exutil.CLI) (bool, error) {
logger.Infof("Checking if in this cluster workers can be scaled using machinesets")
if exutil.CheckPlatform(oc) == BaremetalPlatform {
logger.Infof("Baremetal platform. Can't scale up nodes in Baremetal test environmets. Nodes cannot be scaled")
return false, nil
}
if !IsCapabilityEnabled(oc.AsAdmin(), "MachineAPI") {
logger.Infof("MachineAPI capability is disabled. Nodes cannot be scaled")
return false, nil
}
msl, err := NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAll()
if err != nil {
logger.Errorf("Error getting a list of MachineSet resources")
return false, err
}
// If there is no machineset then clearly we can't use them to scale the workers
if len(msl) == 0 {
logger.Infof("No machineset configured. Nodes cannot be scaled")
return false, nil
}
totalworkers := 0
for _, ms := range msl {
replicas, err := ms.Get(`{.spec.replicas}`)
if err != nil {
logger.Errorf("Error getting the number of replicase in %s", ms)
return false, err
}
if replicas != "" {
intReplicas, err := strconv.Atoi(replicas)
if err == nil {
totalworkers += intReplicas
}
}
}
// In some UPI/SNO/Compact clusters machineset resources exist, but they are all configured with 0 replicas
// If all machinesets have 0 replicas, then it means that we need to skip the test case
if totalworkers == 0 {
logger.Infof("All machinesets have 0 worker nodes. Nodes cannot be scaled")
return false, nil
}
return true, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
13f50cb7-d3fb-4e6b-b63d-fae476730666
|
skipTestIfWorkersCannotBeScaled
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func skipTestIfWorkersCannotBeScaled(oc *exutil.CLI) {
canBeScaled, err := WorkersCanBeScaled(oc)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error decidign if worker nodes can be scaled using machinesets")
if !canBeScaled {
g.Skip("Worker nodes cannot be scaled using machinesets. This test cannot be execute if workers cannot be scaled via machineset")
}
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
286e8194-7d8f-4282-9051-c2e8d6151df4
|
isBaselineCapabilitySetNone
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func isBaselineCapabilitySetNone(oc *exutil.CLI) bool {
return len(getEnabledCapabilities(oc)) == 0
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
299f89e2-037e-48d7-83f3-0dc13e24b5bb
|
getEnabledFeatureGates
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func getEnabledFeatureGates(oc *exutil.CLI) ([]string, error) {
enabledFeatureGates, err := NewResource(oc.AsAdmin(), "featuregate", "cluster").Get(`{.status.featureGates[0].enabled[*].name}`)
if err != nil {
return nil, err
}
return strings.Split(enabledFeatureGates, " "), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
caa926ce-5818-4f09-8d84-71bf4ffd86fe
|
IsFeaturegateEnabled
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func IsFeaturegateEnabled(oc *exutil.CLI, featuregate string) (bool, error) {
enabledFeatureGates, err := getEnabledFeatureGates(oc)
if err != nil {
return false, err
}
for _, f := range enabledFeatureGates {
if f == featuregate {
return true, nil
}
}
return false, nil
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
ded7b367-affd-4011-b0bb-bf24694f9cc5
|
SkipIfNoFeatureGate
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func SkipIfNoFeatureGate(oc *exutil.CLI, featuregate string) {
enabled, err := IsFeaturegateEnabled(oc, featuregate)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting enabled featuregates")
if !enabled {
g.Skip(fmt.Sprintf("Featuregate %s is not enabled in this cluster", featuregate))
}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
9a8fb9c0-25c8-4e12-9d60-7561f1caee97
|
getEnabledCapabilities
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func getEnabledCapabilities(oc *exutil.CLI) []interface{} {
jsonStr := NewResource(oc.AsAdmin(), "clusterversion", "version").GetOrFail(`{.status.capabilities.enabledCapabilities}`)
logger.Infof("enabled capabilities: %s", jsonStr)
enabledCapabilities := make([]interface{}, 0)
jsonData := JSON(jsonStr)
if jsonData.Exists() {
enabledCapabilities = jsonData.ToList()
}
return enabledCapabilities
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
047343fe-95ff-47b4-9dba-4dca3e22c1e0
|
IsCapabilityEnabled
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func IsCapabilityEnabled(oc *exutil.CLI, capability string) bool {
enabledCapabilities := getEnabledCapabilities(oc)
enabled := false
for _, ec := range enabledCapabilities {
if ec == capability {
enabled = true
break
}
}
logger.Infof("Capability [%s] is enabled: %v", capability, enabled)
return enabled
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
1e1433bf-d812-42d3-8ad1-9946da7af7c0
|
GetCurrentTestPolarionIDNumber
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetCurrentTestPolarionIDNumber() string {
name := g.CurrentSpecReport().FullText()
r := regexp.MustCompile(`-(?P<id>\d+)-`)
matches := r.FindStringSubmatch(name)
number := r.SubexpIndex("id")
if len(matches) < number+1 {
logger.Errorf("Could not get the test case ID")
return ""
}
return matches[number]
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
ff2d9b23-5570-4ee1-a55f-f50f2694226f
|
GetBase64EncodedFileSourceContent
|
['b64 "encoding/base64"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetBase64EncodedFileSourceContent(fileContent string) string {
encodedContent := b64.StdEncoding.EncodeToString([]byte(fileContent))
return "data:text/plain;charset=utf-8;base64," + encodedContent
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
1681071c-aa1f-4444-8c35-b0e6d7464d9c
|
ConvertOctalPermissionsToDecimalOrFail
|
['"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func ConvertOctalPermissionsToDecimalOrFail(octalPerm string) int {
o.ExpectWithOffset(1, octalPerm).To(o.And(
o.Not(o.BeEmpty()),
o.HavePrefix("0")),
"Error the octal permissions %s should not be empty and should start with a '0' character")
// parse the octal string and conver to integer
iMode, err := strconv.ParseInt(octalPerm, 8, 64)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error parsing string %s to ocatl", octalPerm)
return int(iMode)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d7313ba1-48a7-4b72-8d81-ffad53996e63
|
PtrInt
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func PtrInt(a int) *int {
return &a
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
43a6ab4d-b3dc-4731-a2d1-4c18ff47fdd6
|
PtrStr
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func PtrStr(a string) *string {
return &a
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
7889dead-2d0f-41c3-bc67-1aea8b57bde8
|
PtrTo
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func PtrTo[T any](v T) *T {
return &v
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
c410f580-885b-4e33-b450-fb5a1f1375c3
|
RemoveAllMCDPods
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func RemoveAllMCDPods(oc *exutil.CLI) error {
return removeMCOPods(oc, "-l", "k8s-app=machine-config-daemon")
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
1fc3723d-53af-4c37-a4f8-e33e0d117751
|
removeMCOPods
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func removeMCOPods(oc *exutil.CLI, argsSelector ...string) error {
args := append([]string{"pods", "-n", MachineConfigNamespace}, argsSelector...)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(args...).Execute()
if err != nil {
logger.Errorf("Cannot delete the pods in %s namespace", MachineConfigNamespace)
return err
}
return waitForAllMCOPodsReady(oc, 15*time.Minute)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
b33b5afd-7187-4e88-a4bc-a812998f42b3
|
waitForAllMCOPodsReady
|
['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func waitForAllMCOPodsReady(oc *exutil.CLI, timeout time.Duration) error {
logger.Infof("Waiting for MCO pods to be runnging and ready in namespace %s", MachineConfigNamespace)
mcoPodsList := NewNamespacedResourceList(oc.AsAdmin(), "pod", MachineConfigNamespace)
mcoPodsList.PrintDebugCommand()
immediate := false
waitErr := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, timeout, immediate,
func(_ context.Context) (bool, error) {
status, err := mcoPodsList.Get(`{.items[*].status.conditions[?(@.type=="Ready")].status}`)
if err != nil {
logger.Errorf("Problems getting pods info. Trying again")
return false, nil
}
if strings.Contains(status, "False") {
return false, nil
}
return true, nil
})
if waitErr != nil {
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", MachineConfigNamespace).Execute()
return fmt.Errorf("MCO pods were deleted in namespace %s, but they did not become ready", MachineConfigNamespace)
}
return nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
a3cb45af-414e-4fe8-815e-bec2380825fe
|
OCCreate
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func OCCreate(oc *exutil.CLI, fileName string) error {
return oc.WithoutNamespace().Run("create").Args("-f", fileName).Execute()
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
bf57e789-937a-4994-93e2-099f87d065b5
|
GetMCSPodNames
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetMCSPodNames(oc *exutil.CLI) ([]string, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", MachineConfigNamespace,
"-l", "k8s-app=machine-config-server", "-o", "jsonpath={.items[*].metadata.name }").Output()
if err != nil {
return nil, err
}
if strings.Trim(output, " \n") == "" {
return []string{}, nil
}
return strings.Split(output, " "), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
e79c9cfd-87c7-4732-8e5a-cfc6b7e2f93b
|
RotateMCSCertificates
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func RotateMCSCertificates(oc *exutil.CLI) error {
wMcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
master := wMcp.GetNodesOrFail()[0]
remoteAdminKubeConfig := fmt.Sprintf("/root/remoteKubeConfig-%s", exutil.GetRandomString())
adminKubeConfig := exutil.KubeConfigPath()
defer master.RemoveFile(remoteAdminKubeConfig)
err := master.CopyFromLocal(adminKubeConfig, remoteAdminKubeConfig)
if err != nil {
return err
}
command := fmt.Sprintf("oc --kubeconfig=%s --insecure-skip-tls-verify adm ocp-certificates regenerate-machine-config-server-serving-cert",
remoteAdminKubeConfig)
logger.Infof("RUN: %s", command)
stdout, err := master.DebugNodeWithChroot(strings.Split(command, " ")...)
logger.Infof(stdout)
return err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
3eaa956a-f83f-4864-9794-cd96ac4adbed
|
GetCertificatesInfoFromPemBundle
|
['"crypto/x509"', '"encoding/pem"', '"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetCertificatesInfoFromPemBundle(bundleName string, pemBundle []byte) ([]CertificateInfo, error) {
var certificatesInfo []CertificateInfo
if pemBundle == nil {
return nil, fmt.Errorf("Provided pem bundle is nil")
}
if len(pemBundle) == 0 {
logger.Infof("Empty pem bundle")
return certificatesInfo, nil
}
for {
block, rest := pem.Decode(pemBundle)
if block == nil {
return nil, fmt.Errorf("failed to parse certificate PEM:\n%s", string(pemBundle))
}
logger.Infof("FOUND: %s", block.Type)
if block.Type != "CERTIFICATE" {
return nil, fmt.Errorf("Only CERTIFICATES are expected in the bundle, but a type %s was found in it", block.Type)
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certificatesInfo = append(certificatesInfo,
CertificateInfo{
BundleFile: bundleName,
NotAfter: cert.NotAfter.Format(time.RFC3339),
NotBefore: cert.NotBefore.Format(time.RFC3339),
Signer: cert.Issuer.String(),
Subject: cert.Subject.String(),
},
)
pemBundle = rest
if len(rest) == 0 {
break
}
}
return certificatesInfo, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
b94681fd-f201-4f6b-bfc2-5134bacb2568
|
GetImageRegistryCertificates
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetImageRegistryCertificates(oc *exutil.CLI) (map[string]string, error) {
return GetDataFromConfigMap(oc.AsAdmin(), "openshift-config-managed", "image-registry-ca")
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
8e836a16-651f-4cc8-8345-5385046df9bb
|
GetManagedMergedTrustedImageRegistryCertificates
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetManagedMergedTrustedImageRegistryCertificates(oc *exutil.CLI) (map[string]string, error) {
return GetDataFromConfigMap(oc.AsAdmin(), "openshift-config-managed", "merged-trusted-image-registry-ca")
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
c40aee7c-b2f8-49d0-bd46-89ccf676845b
|
GetDataFromConfigMap
|
['"encoding/json"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetDataFromConfigMap(oc *exutil.CLI, namespace, name string) (map[string]string, error) {
data := map[string]string{}
cm := NewNamespacedResource(oc.AsAdmin(), "ConfigMap", namespace, name)
dataJSON, err := cm.Get(`{.data}`)
if err != nil {
return nil, err
}
if dataJSON == "" {
return data, nil
}
if err := json.Unmarshal([]byte(dataJSON), &data); err != nil {
return nil, err
}
return data, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
018d97eb-314b-4690-b42a-288b1a46632b
|
createCA
|
['"crypto/x509"', '"encoding/pem"', '"os/exec"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func createCA(tmpDir, caFileName string) (keyPath, caPath string, err error) {
var (
keyFileName = "privateKey.pem"
)
caPath = filepath.Join(tmpDir, caFileName)
keyPath = filepath.Join(tmpDir, keyFileName)
logger.Infof("Creating CA in directory %s", tmpDir)
logger.Infof("Create key")
keyArgs := []string{"genrsa", "-out", keyFileName, "4096"}
cmd := exec.Command("openssl", keyArgs...)
cmd.Dir = tmpDir
output, err := cmd.CombinedOutput()
if err != nil {
logger.Errorf(string(output))
return "", "", err
}
logger.Infof("Create CA")
caArgs := []string{"req", "-new", "-x509", "-nodes", "-days", "3600", "-key", "privateKey.pem", "-out", caFileName, "-subj", "/OU=MCO QE/CN=example.com"}
cmd = exec.Command("openssl", caArgs...)
cmd.Dir = tmpDir
output, err = cmd.CombinedOutput()
if err != nil {
logger.Errorf(string(output))
return "", "", err
}
return keyPath, caPath, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
946f8346-cf4d-40b3-b638-fbf517462255
|
splitCommandString
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func splitCommandString(strCommand string) []string {
command := []string{}
insideDoubleQuote := false
insideSingleQuote := false
isSingleQuote := func(b byte) bool {
return b == '\'' && !insideDoubleQuote
}
isDoubleQuote := func(b byte) bool {
return b == '"' && !insideSingleQuote
}
arg := []byte{}
for _, char := range []byte(strings.TrimSpace(strCommand)) {
if isDoubleQuote(char) {
// skip the first character of the quote
if !insideDoubleQuote {
insideDoubleQuote = true
continue
}
// we are inside a quote
// if the new double quote is scaped we unscape it and continue inside a quote
if arg[len(arg)-1] == '\\' {
arg[len(arg)-1] = '"'
continue
}
// If there is no scaped char the we get out of the quote state ignoring the last character of the quote
insideDoubleQuote = false
continue
}
if isSingleQuote(char) {
// skip the first character of the quote
if !insideSingleQuote {
insideSingleQuote = true
continue
}
// we are inside a quote
// if the new single quote is scaped we unscape it and continue inside a quote
if arg[len(arg)-1] == '\\' {
arg[len(arg)-1] = '\''
continue
}
// If there is no scaped char the we get out of the quote state ignoring the last character of the quote
insideSingleQuote = false
continue
}
if char == ' ' && !insideDoubleQuote && !insideSingleQuote {
command = append(command, string(arg))
arg = []byte{}
continue
}
arg = append(arg, char)
}
if len(arg) > 0 {
command = append(command, string(arg))
}
return command
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
1ba65e91-a80f-4fb3-80ce-d892e8f8eb15
|
GetAPIServerInternalURI
|
['"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetAPIServerInternalURI(oc *exutil.CLI) (string, error) {
infra := NewResource(oc, "infrastructure", "cluster")
apiServerInternalURI, err := infra.Get(`{.status.apiServerInternalURI}`)
if err != nil {
return "", err
}
return regexp.MustCompile(`^https*:\/\/(.*):\d+$`).ReplaceAllString(strings.TrimSpace(apiServerInternalURI), `$1`), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
153b5e91-6efb-4309-80cb-70644fb80bcf
|
IsCompactOrSNOCluster
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func IsCompactOrSNOCluster(oc *exutil.CLI) bool {
var (
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcpList = NewMachineConfigPoolList(oc.AsAdmin())
)
return wMcp.IsEmpty() && len(mcpList.GetAllOrFail()) == 2
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
b661fca6-db47-4aa6-bd52-f01c0759278c
|
IsInstalledWithAssistedInstallerOrFail
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func IsInstalledWithAssistedInstallerOrFail(oc *exutil.CLI) bool {
logger.Infof("Checking if the cluster was installed using assisted-installer")
assistedInstallerNS := NewResource(oc, "ns", "assisted-installer")
return assistedInstallerNS.Exists()
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
21ce5bae-8fe4-47d9-b2f4-165a2fb5c6b9
|
IsOnPremPlatform
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func IsOnPremPlatform(platform string) bool {
switch platform {
case BaremetalPlatform, OvirtPlatform, OpenstackPlatform, VspherePlatform, NutanixPlatform:
return true
default:
return false
}
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
c655316c-1306-4d9f-9d9b-98b630f6a71c
|
SkipIfNotOnPremPlatform
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func SkipIfNotOnPremPlatform(oc *exutil.CLI) {
platform := exutil.CheckPlatform(oc)
if !IsOnPremPlatform(platform) {
g.Skip(fmt.Sprintf("Current platform: %s. This test can only be execute in OnPrem platforms.", platform))
}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
4592d4af-8228-4c43-93c0-1a0956613a34
|
GetClonedResourceJSONString
|
['"github.com/tidwall/sjson"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetClonedResourceJSONString(res ResourceInterface, newName, newNamespace string, extraModifications func(string) (string, error)) (string, error) {
jsonRes, err := res.GetCleanJSON()
if err != nil {
return "", err
}
jsonRes, err = sjson.Delete(jsonRes, "status")
if err != nil {
return "", err
}
jsonRes, err = sjson.Delete(jsonRes, "metadata.creationTimestamp")
if err != nil {
return "", err
}
jsonRes, err = sjson.Delete(jsonRes, "metadata.resourceVersion")
if err != nil {
return "", err
}
jsonRes, err = sjson.Delete(jsonRes, "metadata.uid")
if err != nil {
return "", err
}
jsonRes, err = sjson.Delete(jsonRes, "metadata.generation")
if err != nil {
return "", err
}
jsonRes, err = sjson.Set(jsonRes, "metadata.name", newName)
if err != nil {
return "", err
}
if newNamespace != "" {
jsonRes, err = sjson.Set(jsonRes, "metadata.namespace", newNamespace)
if err != nil {
return "", err
}
}
if extraModifications != nil {
logger.Infof("Executing extra modifications")
return extraModifications(jsonRes)
}
return jsonRes, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
531920a2-0e93-483a-a7e0-f53e70f26fe1
|
CloneResource
|
['"encoding/json"', '"os"', '"github.com/google/uuid"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func CloneResource(res ResourceInterface, newName, newNamespace string, extraModifications func(string) (string, error)) (*Resource, error) {
logger.Infof("Cloning resource %s with name %s and namespace %s", res, newName, newNamespace)
jsonRes, err := GetClonedResourceJSONString(res, newName, newNamespace, extraModifications)
if err != nil {
return nil, err
}
if newNamespace == "" {
newNamespace = res.GetNamespace()
}
filename := "cloned-" + res.GetKind() + "-" + newName + "-" + uuid.NewString()
if newNamespace != "" {
filename += "-namespace"
}
filename += ".json"
tmpFile := generateTmpFile(res.GetOC(), filename)
wErr := os.WriteFile(tmpFile, []byte(jsonRes), 0o644)
if wErr != nil {
return nil, wErr
}
logger.Infof("New resource created using definition file %s", tmpFile)
_, cErr := res.GetOC().AsAdmin().WithoutNamespace().Run("create").Args("-f", tmpFile).Output()
if cErr != nil {
return nil, cErr
}
return NewNamespacedResource(res.GetOC(), res.GetKind(), newNamespace, newName), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
7abba83a-7210-4128-aa9f-2beec5ed83d1
|
skipIfNoTechPreview
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func skipIfNoTechPreview(oc *exutil.CLI) {
if !exutil.IsTechPreviewNoUpgrade(oc) {
g.Skip("featureSet: TechPreviewNoUpgrade is required for this test")
}
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
8afd2b31-3177-450b-b793-e186b63b54a2
|
IsTrue
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func IsTrue(s string) bool {
return strings.EqualFold(s, TrueString)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
97c4cee8-ae23-483a-9b72-f0280929c94e
|
ToJSON
|
['"bytes"', '"encoding/json"', '"sigs.k8s.io/yaml"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func ToJSON(content string) (string, error) {
var js json.RawMessage
if json.Unmarshal([]byte(content), &js) == nil {
// the string is already JSON, no need to manipulate it
return content, nil
}
bytes, err := yaml.YAMLToJSON([]byte(content))
return string(bytes), err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
bfc7a219-745b-4a88-91e1-bc2beb4d78c5
|
getCertsFromKubeconfig
|
['"context"', 'b64 "encoding/base64"', '"fmt"', '"github.com/tidwall/gjson"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func getCertsFromKubeconfig(kubeconfig string) (string, error) {
// We don't know if the kubeconfig file will be in YAML or in JSON format
// We will transform it into JSON
JSONstring, err := ToJSON(kubeconfig)
if err != nil {
return "", err
}
currentCtx := gjson.Get(JSONstring, "current-context")
logger.Debugf("Context: %s\n", currentCtx)
if !currentCtx.Exists() || currentCtx.String() == "" {
return "", fmt.Errorf("No current-contenxt in the provided kubeconfig")
}
logger.Debugf("Current context: %s", currentCtx.String())
cluster := gjson.Get(JSONstring, `contexts.#(name=="`+currentCtx.String()+`").context.cluster`)
if !cluster.Exists() || cluster.String() == "" {
return "", fmt.Errorf("No current cluster information for context %s in the provided kubeconfig", currentCtx.String())
}
logger.Debugf("Cluster: %s\n", cluster.String())
cert64 := gjson.Get(JSONstring, `clusters.#(name=="`+cluster.String()+`").cluster.certificate-authority-data`)
if !cert64.Exists() || cert64.String() == "" {
return "", fmt.Errorf("No current certificate-authority-data information for context %s and cluster %s in the provided kubeconfig", currentCtx.String(), cluster.String())
}
cert, err := b64.StdEncoding.DecodeString(cert64.String())
if err != nil {
logger.Errorf("The certiifcate provided in the kubeconfig is not base64 encoded")
return "", err
}
logger.Infof("Certificate successfully extracted from kubeconfig data")
logger.Debugf("Cert: %s\n", string(cert))
return string(cert), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
cfe01fc7-5efe-492d-b880-e63305f98798
|
checkAllOperatorsHealthy
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func checkAllOperatorsHealthy(oc *exutil.CLI, timeout, poll string) {
o.Eventually(func(gm o.Gomega) { // Passing o.Gomega as parameter we can use assertions inside the Eventually function without breaking the retries.
ops, err := NewResourceList(oc, "co").GetAll()
gm.Expect(err).NotTo(o.HaveOccurred(), "Could not get a list with all the clusteroperator resources")
for _, op := range ops {
gm.Expect(&op).NotTo(BeDegraded(), "%s is Degraded!. \n%s", op.PrettyString())
}
}, timeout, poll).
Should(o.Succeed(),
"There are degraded ClusterOperators!")
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
657b15a0-59e7-43f9-8756-0f65b29b8a68
|
IsSNO
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func IsSNO(oc *exutil.CLI) bool {
return len(exutil.OrFail[[]Node](NewNodeList(oc.AsAdmin()).GetAll())) == 1
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
bd300f1e-bd8a-4433-9762-3420d0ea091f
|
IsSNOSafe
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func IsSNOSafe(oc *exutil.CLI) (bool, error) {
allNodes, err := NewNodeList(oc.AsAdmin()).GetAll()
if err != nil {
return false, err
}
return len(allNodes) == 1, nil
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
7fb41cb3-69f3-4f03-8ba4-fd4ae6bec74c
|
SkipIfSNO
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func SkipIfSNO(oc *exutil.CLI) {
if IsSNO(oc) {
g.Skip("There is only 1 node in the cluster. This test is not supported in SNO clusters")
}
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
5a8cacf3-1b44-444e-85ff-616399408867
|
SkipIfCompactOrSNO
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func SkipIfCompactOrSNO(oc *exutil.CLI) {
if IsCompactOrSNOCluster(oc) {
g.Skip("The test is not supported in Compact or SNO clusters")
}
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
c4ce9553-be1e-4b4c-8bb1-e30b704b1260
|
getAllKubeProxyPod
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func getAllKubeProxyPod(oc *exutil.CLI, namespace string) ([]string, error) {
var kubeRabcProxyPodList []string
getKubeProxyPod, err := exutil.GetAllPods(oc.AsAdmin(), namespace)
for i := range getKubeProxyPod {
if strings.Contains(getKubeProxyPod[i], "kube-rbac-proxy-crio-") {
kubeRabcProxyPodList = append(kubeRabcProxyPodList, getKubeProxyPod[i])
}
}
if len(kubeRabcProxyPodList) == 0 {
logger.Infof("Empty kube-rbac-proxy-crio- pod list")
return kubeRabcProxyPodList, err
}
return kubeRabcProxyPodList, err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
2886dfc4-3b47-46e5-82ec-0c32bdce59b2
|
WaitForStableCluster
|
['"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func WaitForStableCluster(oc *exutil.CLI, minimumStable, timeout string) error {
err := oc.AsAdmin().Run("adm").Args("wait-for-stable-cluster", "--minimum-stable-period", minimumStable, "--timeout", timeout).Execute()
if err != nil {
oc.Run("get").Args("co").Execute()
}
return err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d37d34c8-7267-4d72-8bdc-baf351d727f3
|
extractJournalLogs
|
['"path"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func extractJournalLogs(oc *exutil.CLI, outDir string) (totalErr error) {
var (
nl = NewNodeList(oc)
)
logger.Infof("Collecting journal logs")
allNodes, err := nl.GetAll()
if err != nil {
return err
}
for _, node := range allNodes {
if node.HasTaintEffectOrFail("NoExecute") {
logger.Infof("Node %s is tainted with 'NoExecute'. Validation skipped.", node.GetName())
continue
}
if node.GetConditionStatusByType("DiskPressure") != FalseString {
logger.Infof("Node %s is under disk pressure. The node cannot be debugged. We skip the validation for this node", node.GetName())
continue
}
logger.Infof("Collecting journal logs from node: %s", node.GetName())
fileName := path.Join(outDir, node.GetName()+"-journal.log")
tmpFilePath := path.Join("/tmp/journal.log")
_, err := node.DebugNodeWithChroot("bash", "-c", "journalctl -o with-unit > "+tmpFilePath)
if err != nil {
totalErr = err
logger.Infof("Error getting journal logs from node %s: %s", node.GetName(), err)
continue
}
err = node.CopyToLocal(tmpFilePath, fileName)
if err != nil {
totalErr = err
logger.Infof("Error copying the file with the journal logs from node %s: %s", node.GetName(), err)
continue
}
}
return totalErr
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
52118cce-dadd-48c0-b031-9c711c13da73
|
Retry
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func Retry(attempts int, delay time.Duration, f func() error) error {
var err error
for i := 0; i < attempts; i++ {
err = f()
if err == nil {
return nil
}
logger.Errorf("Attempt %d failed: %v\n", i+1, err)
if i < attempts-1 {
time.Sleep(delay)
}
}
return err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
b9d55ac7-55c5-40ce-b21e-b07b26445ea5
|
GetLastNLines
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetLastNLines(s string, n int) string {
lines := strings.Split(strings.ReplaceAll(s, "\r\n", "\n"), "\n")
lenLines := len(lines)
if lenLines > n {
return strings.Join(lines[lenLines-n:], "\n")
}
return strings.Join(lines, "\n")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
3c313b1a-c8b0-4f1c-b363-5c30a0abc1dc
|
RemoveDuplicates
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func RemoveDuplicates[T comparable](list []T) []T {
allKeys := make(map[T]bool)
fileterdList := []T{}
for _, item := range list {
if !allKeys[item] {
allKeys[item] = true
fileterdList = append(fileterdList, item)
}
}
return fileterdList
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
736c5146-0b86-4cf9-83f2-c599c87ae48e
|
GetClusterDesiredReleaseImage
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func GetClusterDesiredReleaseImage(oc *exutil.CLI) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o", "jsonpath={.status.desired.image}").Output()
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
413cdbab-c0fb-4f0a-b6a3-5749de55f2f8
|
MergeDockerConfigs
|
['"encoding/json"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/util.go
|
func MergeDockerConfigs(dockerConfig1, dockerConfig2 string) (string, error) {
type DockerConfig struct {
Auths map[string]interface{} `json:"auths"`
}
var config1, config2 DockerConfig
err := json.Unmarshal([]byte(dockerConfig1), &config1)
if err != nil {
logger.Errorf("Error unmarshalling dockerConfig1")
return "", err
}
err = json.Unmarshal([]byte(dockerConfig2), &config2)
if err != nil {
logger.Errorf("Error unmarshalling dockerConfig2")
return "", err
}
for k, v := range config2.Auths {
config1.Auths[k] = v
}
mergedConfig, err := json.Marshal(config1)
if err != nil {
logger.Errorf("Cannot marshal the merged docker config")
return "", err
}
return string(mergedConfig), err
}
|
mco
| ||||
file
|
openshift/openshift-tests-private
|
f242c1e6-e8ee-4ab4-b7c2-ab7d553d12e1
|
workloads
|
import exutil "github.com/openshift/openshift-tests-private/test/extended/util"
|
github.com/openshift/openshift-tests-private/test/extended/mco/workloads.go
|
package mco
import exutil "github.com/openshift/openshift-tests-private/test/extended/util"
// Job struct to handle Job resources
type Job struct {
Resource
}
// NewJob constructs a new Job struct
func NewJob(oc *exutil.CLI, namespace, name string) *Job {
return &Job{*NewNamespacedResource(oc, "job", namespace, name)}
}
// GetPods returns the pods triggered by this job
func (j Job) GetPods() ([]Pod, error) {
pl := NewPodList(j.GetOC(), j.GetNamespace())
pl.ByLabel("job-name=" + j.GetName())
return pl.GetAll()
}
// GetFirstPod returns the first pod triggered by the job
func (j Job) GetFirstPod() (*Pod, error) {
pods, err := j.GetPods()
if err != nil {
return nil, err
}
if len(pods) == 0 {
return nil, nil
}
return &(pods[0]), nil
}
// GetActive returns the number of active pods for this job
func (j Job) GetActive() (string, error) {
return j.Get(`{.status.active}`)
}
// GetReady returns the number of ready pods for this job
func (j Job) GetReady() (string, error) {
return j.Get(`{.status.ready}`)
}
// Pod struct to handle Pod resources
type Pod struct {
Resource
}
// PodList struct to handle lists of Pod resources
type PodList struct {
ResourceList
}
// NewPod constructs a new Pod struct
func NewPod(oc *exutil.CLI, namespace, name string) *Pod {
return &Pod{*NewNamespacedResource(oc, "pod", namespace, name)}
}
// NewPodList constructs a new PodList struct to handle all existing Pods
func NewPodList(oc *exutil.CLI, namespace string) *PodList {
return &PodList{*NewNamespacedResourceList(oc, "pod", namespace)}
}
// GetAll returns a []Pod slice with all existing nodes
func (pl PodList) GetAll() ([]Pod, error) {
allMResources, err := pl.ResourceList.GetAll()
if err != nil {
return nil, err
}
allMs := make([]Pod, 0, len(allMResources))
for _, mRes := range allMResources {
allMs = append(allMs, *NewPod(pl.oc, mRes.GetNamespace(), mRes.GetName()))
}
return allMs, nil
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
1f91e118-4a83-4810-b707-c6071a871057
|
NewJob
|
['Job']
|
github.com/openshift/openshift-tests-private/test/extended/mco/workloads.go
|
func NewJob(oc *exutil.CLI, namespace, name string) *Job {
return &Job{*NewNamespacedResource(oc, "job", namespace, name)}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
4b6dd4e1-66bd-4e4e-927f-f859cedc092d
|
GetPods
|
['Job', 'Pod']
|
github.com/openshift/openshift-tests-private/test/extended/mco/workloads.go
|
func (j Job) GetPods() ([]Pod, error) {
pl := NewPodList(j.GetOC(), j.GetNamespace())
pl.ByLabel("job-name=" + j.GetName())
return pl.GetAll()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
9b7ae7ba-0bec-4e5c-bdc9-37914d3e08d2
|
GetFirstPod
|
['Job', 'Pod']
|
github.com/openshift/openshift-tests-private/test/extended/mco/workloads.go
|
func (j Job) GetFirstPod() (*Pod, error) {
pods, err := j.GetPods()
if err != nil {
return nil, err
}
if len(pods) == 0 {
return nil, nil
}
return &(pods[0]), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
a2484256-f81e-4f79-b589-0db37acddb8a
|
GetActive
|
['Job']
|
github.com/openshift/openshift-tests-private/test/extended/mco/workloads.go
|
func (j Job) GetActive() (string, error) {
return j.Get(`{.status.active}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
53baea38-1cb5-4899-bc6b-d59201b5c69b
|
GetReady
|
['Job']
|
github.com/openshift/openshift-tests-private/test/extended/mco/workloads.go
|
func (j Job) GetReady() (string, error) {
return j.Get(`{.status.ready}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
a7dffe98-6c72-48e9-b543-ebc543f7caa6
|
NewPod
|
['Pod']
|
github.com/openshift/openshift-tests-private/test/extended/mco/workloads.go
|
func NewPod(oc *exutil.CLI, namespace, name string) *Pod {
return &Pod{*NewNamespacedResource(oc, "pod", namespace, name)}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
e6f176bc-a19b-4f5d-a527-570fb71de1b9
|
NewPodList
|
['PodList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/workloads.go
|
func NewPodList(oc *exutil.CLI, namespace string) *PodList {
return &PodList{*NewNamespacedResourceList(oc, "pod", namespace)}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
41576733-9151-4a3e-b265-d0a058eb2397
|
GetAll
|
['Pod', 'PodList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/workloads.go
|
func (pl PodList) GetAll() ([]Pod, error) {
allMResources, err := pl.ResourceList.GetAll()
if err != nil {
return nil, err
}
allMs := make([]Pod, 0, len(allMResources))
for _, mRes := range allMResources {
allMs = append(allMs, *NewPod(pl.oc, mRes.GetNamespace(), mRes.GetName()))
}
return allMs, nil
}
|
mco
| ||||
test
|
openshift/openshift-tests-private
|
15ac6efe-5cf2-470b-b66a-14f66366f337
|
monitoring
|
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
package monitoring
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-monitoring] Cluster_Observability parallel monitoring", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("monitor-"+getRandomString(), exutil.KubeConfigPath())
monitoringCM monitoringConfig
monitoringBaseDir string
)
g.BeforeEach(func() {
monitoringBaseDir = exutil.FixturePath("testdata", "monitoring")
monitoringCMTemplate := filepath.Join(monitoringBaseDir, "cluster-monitoring-cm.yaml")
// enable user workload monitoring and load other configurations from cluster-monitoring-config configmap
monitoringCM = monitoringConfig{
name: "cluster-monitoring-config",
namespace: "openshift-monitoring",
enableUserWorkload: true,
template: monitoringCMTemplate,
}
monitoringCM.create(oc)
})
// author: [email protected]
g.It("Author:hongyli-High-49073-Retention size settings for platform", func() {
checkRetention(oc, "openshift-monitoring", "prometheus-k8s", "storage.tsdb.retention.size=10GiB", platformLoadTime)
checkRetention(oc, "openshift-monitoring", "prometheus-k8s", "storage.tsdb.retention.time=45d", 20)
})
// author: [email protected]
g.It("Author:hongyli-High-49514-federate service endpoint and route of platform Prometheus", func() {
var err error
exutil.By("Bind cluster-monitoring-view cluster role to current user")
clusterRoleBindingName := "clusterMonitoringViewFederate"
defer deleteClusterRoleBinding(oc, clusterRoleBindingName)
clusterRoleBinding, err := bindClusterRoleToUser(oc, "cluster-monitoring-view", oc.Username(), clusterRoleBindingName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Created: %v %v", "ClusterRoleBinding", clusterRoleBinding.Name)
exutil.By("Get token of current user")
token := oc.UserConfig().BearerToken
exutil.By("check federate endpoint service")
checkMetric(oc, "https://prometheus-k8s.openshift-monitoring.svc:9091/federate --data-urlencode 'match[]=prometheus_build_info'", token, "prometheus_build_info", 3*platformLoadTime)
exutil.By("check federate route")
checkRoute(oc, "openshift-monitoring", "prometheus-k8s-federate", token, "match[]=prometheus_build_info", "prometheus_build_info", 3*platformLoadTime)
})
// author: [email protected]
g.It("Author:juzhao-LEVEL0-Medium-49172-Enable validating webhook for AlertmanagerConfig customer resource", func() {
var (
err error
output string
namespace string
invalidAlertmanagerConfig = filepath.Join(monitoringBaseDir, "invalid-alertmanagerconfig.yaml")
validAlertmanagerConfig = filepath.Join(monitoringBaseDir, "valid-alertmanagerconfig.yaml")
)
exutil.By("Get prometheus-operator-admission-webhook deployment")
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "prometheus-operator-admission-webhook", "-n", "openshift-monitoring").Execute()
if err != nil {
e2e.Logf("Unable to get deployment prometheus-operator-admission-webhook.")
}
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
namespace = oc.Namespace()
exutil.By("confirm alertmanagerconfigs CRD exists")
err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
alertmanagerconfigs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("customresourcedefinitions", "alertmanagerconfigs.monitoring.coreos.com").Output()
if err != nil || strings.Contains(alertmanagerconfigs, "not found") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "alertmanagerconfigs CRD does not exist")
exutil.By("Create invalid AlertmanagerConfig, should throw out error")
output, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", invalidAlertmanagerConfig, "-n", namespace).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("The AlertmanagerConfig \"invalid-test-config\" is invalid"))
exutil.By("Create valid AlertmanagerConfig, should not have error")
output, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", validAlertmanagerConfig, "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("valid-test-config created"))
})
//author: [email protected]
g.It("Author:tagao-Medium-42800-Allow configuration of the log level for Alertmanager in the CMO configmap", func() {
exutil.By("Check alertmanager container logs")
exutil.WaitAndGetSpecificPodLogs(oc, "openshift-monitoring", "alertmanager", "alertmanager-main-0", "level=debug")
})
// author: [email protected]
g.It("Author:juzhao-Medium-43748-Ensure label namespace exists on all alerts", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check alerts, should have label namespace exists on all alerts")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="Watchdog"}'`, token, `"namespace":"openshift-monitoring"`, 2*platformLoadTime)
})
//author: [email protected]
g.It("Author:tagao-Medium-47307-Add external label of origin to platform alerts", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check alerts, could see the `openshift_io_alert_source` field for in-cluster alerts")
checkMetric(oc, "https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts", token, `"openshift_io_alert_source":"platform"`, 2*platformLoadTime)
})
//author: [email protected]
g.It("Author:tagao-Medium-45163-Show labels for pods/nodes/namespaces/PV/PVC/PDB in metrics", func() {
var (
ns string
helloPodPvc = filepath.Join(monitoringBaseDir, "helloPodPvc.yaml")
)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check if the cluster have default storage class")
checkSC, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sc", "--no-headers").Output()
e2e.Logf("storage class: %s", checkSC)
hasSC := false
if strings.Contains(checkSC, "default") {
hasSC = true
exutil.By("create project ns then attach pv/pvc")
oc.SetupProject()
ns = oc.Namespace()
createResourceFromYaml(oc, ns, helloPodPvc)
}
exutil.By("Check labels for pod")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_labels{pod="alertmanager-main-0"}'`, token, `"label_statefulset_kubernetes_io_pod_name"`, uwmLoadTime)
exutil.By("Check labels for node")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_node_labels'`, token, `"label_kubernetes_io_hostname"`, uwmLoadTime)
exutil.By("Check labels for namespace")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_namespace_labels{namespace="openshift-monitoring"}'`, token, `"label_kubernetes_io_metadata_name"`, uwmLoadTime)
exutil.By("Check labels for PDB")
checkPDB, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pdb", "thanos-querier-pdb", "-n", "openshift-monitoring").Output()
if !strings.Contains(checkPDB, `"thanos-querier-pdb" not found`) {
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_poddisruptionbudget_labels{poddisruptionbudget="thanos-querier-pdb"}'`, token, `"label_app_kubernetes_io_name"`, uwmLoadTime)
}
exutil.By("Check labels for PV/PVC if need")
if hasSC {
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_persistentvolume_labels'`, token, `"persistentvolume"`, 2*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_persistentvolumeclaim_labels'`, token, `"persistentvolumeclaim"`, 2*uwmLoadTime)
}
})
//author: [email protected]
g.It("Author:tagao-Medium-48432-Allow OpenShift users to configure request logging for Thanos Querier query endpoint", func() {
exutil.By("check thanos-querier pods are normal and able to see the request.logging-config setting")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
cmd := "-ojsonpath={.spec.template.spec.containers[?(@.name==\"thanos-query\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "deploy", "thanos-querier", cmd, "request.logging-config", true)
//thanos-querier pod name will changed when cm modified, pods may not restart yet during the first check
exutil.By("double confirm thanos-querier pods are ready")
podList, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/instance=thanos-querier")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podList {
exutil.AssertPodToBeReady(oc, pod, "openshift-monitoring")
}
exutil.By("query with thanos-querier svc")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="Watchdog"}'`, token, `Watchdog`, 3*uwmLoadTime)
exutil.By("check from thanos-querier logs")
//oc -n openshift-monitoring logs -l app.kubernetes.io/instance=thanos-querier -c thanos-query --tail=-1
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/instance=thanos-querier", "thanos-query", `Watchdog`, true)
})
// author: [email protected]
g.It("Author:juzhao-Low-43038-Should not have error for loading OpenAPI spec for v1beta1.metrics.k8s.io", func() {
var (
searchString string
result string
)
searchString = "loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with:"
podList, err := exutil.GetAllPodsWithLabel(oc, "openshift-kube-apiserver", "app=openshift-kube-apiserver")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("kube-apiserver Pods: %v", podList)
exutil.By("check the kube-apiserver logs, should not have error for v1beta1.metrics.k8s.io")
for _, pod := range podList {
exutil.AssertPodToBeReady(oc, pod, "openshift-kube-apiserver")
result, _ = exutil.GetSpecificPodLogs(oc, "openshift-kube-apiserver", "kube-apiserver", pod, searchString)
e2e.Logf("output result in logs: %v", result)
o.Expect(len(result) == 0).To(o.BeTrue(), "found the error logs which is unexpected")
}
})
//author: [email protected]
g.It("Author:tagao-Low-55670-Prometheus should not collecting error messages for completed pods [Serial]", func() {
exutil.By("delete user-workload-monitoring-config/cluster-monitoring-config configmap at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check pod conditioning in openshift-kube-scheduler")
podStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-kube-scheduler").Output()
e2e.Logf("kube-scheduler Pods:\n%s", podStatus)
o.Expect(podStatus).To(o.ContainSubstring("Completed"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check metrics-server pod logs")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, logsErr := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-l", "app.kubernetes.io/name=metrics-server", "-c", "metrics-server", "--tail=-1", "-n", "openshift-monitoring").Output()
o.Expect(logsErr).NotTo(o.HaveOccurred())
if strings.Contains(output, "unable to fetch CPU metrics for pod openshift-kube-scheduler/") {
e2e.Logf("output result in logs:\n%s", output)
e2e.Failf("found unexpected logs")
}
})
//author: [email protected]
g.It("Author:tagao-LEVEL0-Medium-55767-Missing metrics in kube-state-metrics", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check kube-state-metrics metrics, the following metrics should be visible")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/label/__name__/values`, token, `"kube_pod_container_status_terminated_reason"`, uwmLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/label/__name__/values`, token, `"kube_pod_init_container_status_terminated_reason"`, uwmLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/label/__name__/values`, token, `"kube_pod_status_scheduled_time"`, uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-High-56168-PreChkUpgrade-NonPreRelease-Prometheus never sees endpoint propagation of a deleted pod", func() {
var (
ns = "56168-upgrade-ns"
exampleApp = filepath.Join(monitoringBaseDir, "example-app.yaml")
roleBinding = filepath.Join(monitoringBaseDir, "sa-prometheus-k8s-access.yaml")
)
exutil.By("Create example app")
oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns).Execute()
createResourceFromYaml(oc, ns, exampleApp)
exutil.AssertAllPodsToBeReady(oc, ns)
exutil.By("add role and role binding for example app")
createResourceFromYaml(oc, ns, roleBinding)
exutil.By("label namespace")
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ns, "openshift.io/cluster-monitoring=true").Execute()
exutil.By("check target is up")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/targets`, token, "up", 2*uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-High-56168-PstChkUpgrade-NonPreRelease-Prometheus never sees endpoint propagation of a deleted pod", func() {
exutil.By("get the ns name in PreChkUpgrade")
ns := "56168-upgrade-ns"
exutil.By("delete related resource at the end of case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns).Execute()
exutil.By("delete example app deployment")
deleteApp, _ := oc.AsAdmin().WithoutNamespace().Run("delete").Args("deploy", "prometheus-example-app", "-n", ns).Output()
o.Expect(deleteApp).To(o.ContainSubstring(`"prometheus-example-app" deleted`))
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check metric up==0 under the test project, return null")
checkMetric(oc, "https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=up{namespace=\"56168-upgrade-ns\"}==0'", token, `"result":[]`, 2*uwmLoadTime)
exutil.By("check no alert 'TargetDown'")
checkAlertNotExist(oc, "https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{namespace=\"56168-upgrade-ns\"}'", token, "TargetDown", uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-LEVEL0-Medium-57254-oc adm top node/pod output should not give negative numbers", func() {
exutil.By("check on node")
checkNode, err := exec.Command("bash", "-c", `oc adm top node | awk '{print $2,$3,$4,$5}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(checkNode).NotTo(o.ContainSubstring("-"))
exutil.By("check on pod under specific namespace")
checkNs, err := exec.Command("bash", "-c", `oc -n openshift-monitoring adm top pod | awk '{print $2,$3}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(checkNs).NotTo(o.ContainSubstring("-"))
})
// author: [email protected]
g.It("ConnectedOnly-Author:tagao-LEVEL0-Medium-55696-add telemeter alert TelemeterClientFailures", func() {
exutil.By("check telemetry prometheusrule exists")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheusrules", "telemetry", "-n", "openshift-monitoring").Output()
// Error from server (NotFound): prometheusrules.monitoring.coreos.com "telemetry" not found
if strings.Contains(output, `"telemetry" not found`) {
e2e.Logf("output: %s", output)
g.Skip("this env does not have telemetry prometheusrule, skip the case")
}
exutil.By("check TelemeterClientFailures alert is added")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheusrules", "telemetry", "-ojsonpath={.spec.groups}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("TelemeterClientFailures"))
})
// author: [email protected]
g.It("Author:juzhao-Medium-62092-Don't fire NodeFilesystemAlmostOutOfSpace alert for certain tmpfs mount points", func() {
exutil.By("check NodeFilesystemAlmostOutOfSpace alert from node-exporter-rules prometheusrules")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheusrules", "node-exporter-rules", `-ojsonpath={.spec.groups[*].rules[?(@.alert=="NodeFilesystemAlmostOutOfSpace")].expr}`, "-n", "openshift-monitoring").Output()
e2e.Logf("NodeFilesystemAlmostOutOfSpace alert expr: %v", output)
exutil.By("mountpoint /var/lib/ibmc-s3fs.* is excluded")
o.Expect(output).To(o.ContainSubstring(`mountpoint!~"/var/lib/ibmc-s3fs.*"`))
})
// author: [email protected]
g.It("Author:tagao-Medium-48350-create alert-routing-edit role to allow end users to manage alerting CR", func() {
var (
alertManagerConfig = filepath.Join(monitoringBaseDir, "valid-alertmanagerconfig.yaml")
)
exutil.By("check clusterrole alert-routing-edit exists")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrole").Output()
o.Expect(strings.Contains(output, "alert-routing-edit")).To(o.BeTrue())
exutil.By("create project, add alert-routing-edit RoleBinding to specific user")
oc.SetupProject()
ns := oc.Namespace()
err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "-n", ns, "alert-routing-edit", oc.Username()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("create AlertmanagerConfig under the project")
createResourceFromYaml(oc, ns, alertManagerConfig)
exutil.By("check AlertmanagerConfig is created")
output, _ = oc.WithoutNamespace().Run("get").Args("AlertmanagerConfig", "-n", ns).Output()
o.Expect(output).To(o.ContainSubstring("valid-test-config"))
exutil.By("the user should able to change AlertmanagerConfig")
err = oc.WithoutNamespace().Run("patch").Args("AlertmanagerConfig", "valid-test-config", "-p", `{"spec":{"receivers":[{"name":"webhook","webhookConfigs":[{"url":"https://test.io/push"}]}]}}`, "--type=merge", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check AlertmanagerConfig is updated")
output, _ = oc.WithoutNamespace().Run("get").Args("AlertmanagerConfig", "valid-test-config", "-ojsonpath={.spec.receivers}", "-n", ns).Output()
o.Expect(output).To(o.ContainSubstring("https://test.io/push"))
exutil.By("the user should able to delete AlertmanagerConfig")
err = oc.WithoutNamespace().Run("delete").Args("AlertmanagerConfig", "valid-test-config", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check AlertmanagerConfig is deleted")
output, _ = oc.WithoutNamespace().Run("get").Args("AlertmanagerConfig", "-n", ns).Output()
o.Expect(output).NotTo(o.ContainSubstring("valid-test-config"))
})
// author: [email protected]
g.It("Author:juzhao-Low-62957-Prometheus and Alertmanager should configure ExternalURL correctly", func() {
exutil.By("skip the case if there is not console operator enabled")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperators", "console").Output()
// Error from server (NotFound): clusteroperators.config.openshift.io "console" not found
if strings.Contains(output, `"console" not found`) {
e2e.Logf("output: %s", output)
g.Skip("this cluster does not have console clusteroperator, skip the case")
}
exutil.By("get console route")
consoleURL, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "console", `-ojsonpath={.spec.host}`, "-n", "openshift-console").Output()
e2e.Logf("console route is: %v", consoleURL)
exutil.By("get externalUrl for alertmanager main")
alertExternalUrl, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanager", "main", `-ojsonpath={.spec.externalUrl}`, "-n", "openshift-monitoring").Output()
e2e.Logf("alertmanager main externalUrl is: %v", alertExternalUrl)
o.Expect(alertExternalUrl).To(o.ContainSubstring("https://" + consoleURL))
exutil.By("get externalUrl for prometheus k8s")
prometheusExternalUrl, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", `-ojsonpath={.spec.externalUrl}`, "-n", "openshift-monitoring").Output()
e2e.Logf("prometheus k8s externalUrl is: %v", prometheusExternalUrl)
o.Expect(prometheusExternalUrl).To(o.ContainSubstring("https://" + consoleURL))
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check from alertmanager API, the generatorURL should include https://${consoleURL}")
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?&filter={alertname="Watchdog"}`, token, `"generatorURL":"https://`+consoleURL, 2*platformLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Medium-48942-validation for scrapeTimeout and relabel configs", func() {
var (
invalidServiceMonitor = filepath.Join(monitoringBaseDir, "invalid-ServiceMonitor.yaml")
)
exutil.By("delete test ServiceMonitor at the end of case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("servicemonitor", "console-test-monitoring", "-n", "openshift-monitoring").Execute()
exutil.By("create one ServiceMonitor, set scrapeTimeout bigger than scrapeInterval, and no targetLabel setting")
createResourceFromYaml(oc, "openshift-monitoring", invalidServiceMonitor)
exutil.By("able to see error in prometheus-operator logs")
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus-operator", "prometheus-operator", `error="scrapeTimeout \"120s\" greater than scrapeInterval \"30s\""`, true)
exutil.By("check the configuration is not loaded to prometheus")
checkPrometheusConfig(oc, "openshift-monitoring", "prometheus-k8s-0", `serviceMonitor/openshift-monitoring/console-test-monitoring/0`, false)
exutil.By("edit ServiceMonitor, and set value for scrapeTimeout less than scrapeInterval")
//oc patch servicemonitor console-test-monitoring --type='json' -p='[{"op": "replace", "path": "/spec/endpoints/0/scrapeTimeout", "value":"20s"}]' -n openshift-monitoring
patchConfig := `[{"op": "replace", "path": "/spec/endpoints/0/scrapeTimeout", "value":"20s"}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("servicemonitor", "console-test-monitoring", "-p", patchConfig, "--type=json", "-n", "openshift-monitoring").Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("able to see error for missing targetLabel in prometheus-operator logs")
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus-operator", "prometheus-operator", `relabel configuration for replace action needs targetLabel value`, true)
exutil.By("add targetLabel to ServiceMonitor")
//oc -n openshift-monitoring patch servicemonitor console-test-monitoring --type='json' -p='[{"op": "add", "path": "/spec/endpoints/0/relabelings/0/targetLabel", "value": "namespace"}]'
patchConfig = `[{"op": "add", "path": "/spec/endpoints/0/relabelings/0/targetLabel", "value": "namespace"}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("servicemonitor", "console-test-monitoring", "-p", patchConfig, "--type=json", "-n", "openshift-monitoring").Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("check the configuration loaded to prometheus")
checkPrometheusConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "serviceMonitor/openshift-monitoring/console-test-monitoring/0", true)
})
// author: [email protected]
g.It("Author:juzhao-Medium-62636-Graduate alert overrides and alert relabelings to GA", func() {
var (
alertingRule = filepath.Join(monitoringBaseDir, "alertingRule.yaml")
alertRelabelConfig = filepath.Join(monitoringBaseDir, "alertRelabelConfig.yaml")
)
exutil.By("delete the created AlertingRule/AlertRelabelConfig at the end of the case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("AlertingRule", "monitoring-example", "-n", "openshift-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("AlertRelabelConfig", "monitoring-watchdog", "-n", "openshift-monitoring").Execute()
exutil.By("check AlertingRule/AlertRelabelConfig apiVersion is v1")
_, explainErr := oc.WithoutNamespace().AsAdmin().Run("explain").Args("AlertingRule", "--api-version=monitoring.openshift.io/v1").Output()
o.Expect(explainErr).NotTo(o.HaveOccurred())
_, explainErr = oc.WithoutNamespace().AsAdmin().Run("explain").Args("AlertRelabelConfig", "--api-version=monitoring.openshift.io/v1").Output()
o.Expect(explainErr).NotTo(o.HaveOccurred())
exutil.By("create AlertingRule/AlertRelabelConfig under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", alertingRule)
createResourceFromYaml(oc, "openshift-monitoring", alertRelabelConfig)
exutil.By("check AlertingRule/AlertRelabelConfig are created")
output, _ := oc.WithoutNamespace().Run("get").Args("AlertingRule/monitoring-example", "-ojsonpath={.metadata.name}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("monitoring-example"))
output, _ = oc.WithoutNamespace().Run("get").Args("AlertRelabelConfig/monitoring-watchdog", "-ojsonpath={.metadata.name}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("monitoring-watchdog"))
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check the alert defined in AlertingRule could be found in thanos-querier API")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="ExampleAlert"}'`, token, `"alertname":"ExampleAlert"`, 2*platformLoadTime)
exutil.By("Watchdog alert, the alert label is changed from \"severity\":\"none\" to \"severity\":\"critical\" in alertmanager API")
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?&filter={alertname="Watchdog"}`, token, `"severity":"critical"`, 2*platformLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Low-67008-node-exporter: disable btrfs collector", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("should not see btrfs collector related metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="btrfs"}'`, token, "\"result\":[]", uwmLoadTime)
exutil.By("check btrfs collector is disabled by default")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("no-collector.btrfs"))
})
// author: [email protected]
g.It("Author:tagao-LEVEL0-Medium-68292-Limit the value of GOMAXPROCS on node-exporter to 4", func() {
exutil.By("check the gomaxprocs value in logs")
// % oc -n openshift-monitoring logs -l app.kubernetes.io/name=node-exporter --tail=-1 -c node-exporter | grep -o 'gomaxprocs=[0-9]*' | uniq | cut -d= -f2
nodeExporterLogs, errLogs := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-l", "app.kubernetes.io/name=node-exporter", "--tail=-1", "-c", "node-exporter", "-n", "openshift-monitoring").OutputToFile("OCP-68292_nodeExporter.log")
o.Expect(errLogs).NotTo(o.HaveOccurred())
cmd := fmt.Sprintf(`cat %v | grep -o '%s' | uniq | cut -d= -f2`, nodeExporterLogs, "gomaxprocs=[0-9]*")
gomaxprocsValue, err := exec.Command("bash", "-c", cmd).Output()
e2e.Logf("gomaxprocsValue output: %s", gomaxprocsValue)
gomaxprocsNum, _ := strconv.Atoi(string(gomaxprocsValue))
o.Expect(gomaxprocsNum).To(o.BeNumerically("<=", 4))
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("Author:juzhao-Low-68958-node_exporter shouldn't collect metrics for Calico Virtual NICs", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("should not see metrics for Calico Virtual NICs")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_network_info{device=~"cali.*"}'`, token, "\"result\":[]", uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Medium-69087-Replace OAuth-proxy container with kube-rbac-proxy in Thanos-Querier pod", func() {
exutil.By("check role added")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("role", "cluster-monitoring-metrics-api", "-n", "openshift-monitoring").Output()
o.Expect(output).NotTo(o.ContainSubstring("NotFound"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check cluster role added")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterRole", "cluster-monitoring-view", "-ojsonpath={.rules}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("monitoring.coreos.com"))
o.Expect(err).NotTo(o.HaveOccurred())
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterRole", "prometheus-k8s", "-ojsonpath={.rules[?(\"monitoring.coreos.com\")]}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("monitoring.coreos.com"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check thanos-querier deployment")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "thanos-querier", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"kube-rbac-proxy-web\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("kube-rbac-proxy/config.yaml"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check thanos-querier secret")
// should see `thanos-querier-kube-rbac-proxy-web` is added, and `thanos-querier-oauth-cookie` is removed
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "thanos-querier-kube-rbac-proxy-web", "-n", "openshift-monitoring").Output()
o.Expect(output).NotTo(o.ContainSubstring("NotFound"))
o.Expect(err).NotTo(o.HaveOccurred())
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "thanos-querier-oauth-cookie", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("NotFound"))
exutil.By("Get token of current user")
token := oc.UserConfig().BearerToken
exutil.By("Get route of thanos-querier")
host, hostErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "thanos-querier", "-ojsonpath={.spec.host}", "-n", "openshift-monitoring").Output()
o.Expect(hostErr).NotTo(o.HaveOccurred())
exutil.By("test role can NOT access to ThanosQuerier")
// % curl -H "Authorization: Bearer $token" -k "https://$host/api/v1/query?" --data-urlencode 'query=up{namespace="openshift-monitoring"}'
checkMetric(oc, "https://"+host+"/api/v1/query? --data-urlencode 'query=up{namespace=\"openshift-monitoring\"}'", token, "Forbidden", 2*platformLoadTime)
exutil.By("add role access to ThanosQuerier")
admErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "cluster-monitoring-metrics-api", oc.Username()).Execute()
o.Expect(admErr).NotTo(o.HaveOccurred())
exutil.By("test role access to ThanosQuerier")
// % curl -H "Authorization: Bearer $token" -k "https://$host/api/v1/query?" --data-urlencode 'query=up{namespace="openshift-monitoring"}'
checkMetric(oc, "https://"+host+"/api/v1/query? --data-urlencode 'query=up{namespace=\"openshift-monitoring\"}'", token, "up", 2*platformLoadTime)
})
// author: [email protected]
g.It("Author:juzhao-Medium-69924-Set scrape.timestamp tolerance for prometheus", func() {
exutil.By("confirm in-cluster prometheus is created")
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
prometheus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", "-n", "openshift-monitoring").Output()
if err != nil || strings.Contains(prometheus, "not found") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "in-cluster prometheus is not created")
exutil.By("check in-cluster prometheus scrape.timestamp tolerance")
cmd := `-ojsonpath={.spec.additionalArgs[?(@.name=="scrape.timestamp-tolerance")]}`
checkYamlconfig(oc, "openshift-monitoring", "prometheus", "k8s", cmd, `"value":"15ms"`, true)
//check settings in prometheus pods
podNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"prometheus\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `--scrape.timestamp-tolerance=15ms`, true)
}
})
// author: [email protected]
g.It("Author:juzhao-Medium-70051-Adjust NodeClock alerting rules to be inactive when the PTP operator is installed", func() {
exutil.By("check NodeClockSkewDetected alert expr")
cmd := "-ojsonpath={.spec.groups[*].rules[?(@.alert==\"NodeClockSkewDetected\")].expr}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "node-exporter-rules", cmd, `absent(up{job="ptp-monitor-service"})`, true)
exutil.By("check NodeClockNotSynchronising alert expr")
cmd = "-ojsonpath={.spec.groups[*].rules[?(@.alert==\"NodeClockNotSynchronising\")].expr}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "node-exporter-rules", cmd, `absent(up{job="ptp-monitor-service"})`, true)
})
// author: [email protected]
g.It("Author:juzhao-Medium-69927-Allow to query alerts of application namespaces as an application user from command line", func() {
_, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "cluster-admin", oc.Username()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-user", "cluster-admin", oc.Username()).Execute()
podNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=prometheus", "--ignore-not-found", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// double check prometheus pods are Running
for _, pod := range strings.Fields(podNames) {
assertPodToBeReady(oc, pod, "openshift-monitoring")
}
podNames, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=thanos-query", "--ignore-not-found", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// double check thanos-querier pods are Running
for _, pod := range strings.Fields(podNames) {
assertPodToBeReady(oc, pod, "openshift-monitoring")
}
exutil.By("get user API token")
token, _ := oc.Run("whoami").Args("-t").Output()
exutil.By("Run port-forward command")
cmd, _, _, err := oc.AsAdmin().WithoutNamespace().Run("port-forward").Args("-n", "openshift-monitoring", "service/thanos-querier", "9093:9093").Background()
o.Expect(err).NotTo(o.HaveOccurred())
defer cmd.Process.Kill()
output, err := exec.Command("bash", "-c", "ps -ef | grep 9093").Output()
e2e.Logf("output is: %s", output)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("curl without namespace parameter should return Bad Request")
curlcmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + "https://127.0.0.1:9093/api/v1/alerts"
err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 30*time.Second, false, func(context.Context) (bool, error) {
output, err := exec.Command("bash", "-c", curlcmd).Output()
e2e.Logf("output is: %s", output)
if err != nil {
e2e.Logf("failed to execute the curl: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString("Bad Request", string(output)); matched {
e2e.Logf("Bad Request. The request or configuration is malformed\n")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed to curl without namespace parameter")
exutil.By("curl with namespace parameter should return alerts")
err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 30*time.Second, false, func(context.Context) (bool, error) {
output, err := exec.Command("bash", "-c", curlcmd+"?namespace=openshift-monitoring").Output()
e2e.Logf("output is: %s", output)
if err != nil {
e2e.Logf("failed to execute the curl: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(`"alertname":"Watchdog"`, string(output)); matched {
e2e.Logf("curl with namespace parameter returns Watchdog alert\n")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Cannot get result with namespace parameter")
})
// author: [email protected]
g.It("Author:tagao-Medium-69195-Replace OAuth-proxy container with Kube-RBAC-proxy in Prometheus pod", func() {
exutil.By("check prometheus-k8s-kube-rbac-proxy-web added")
checkSecret, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "prometheus-k8s-kube-rbac-proxy-web", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).NotTo(o.ContainSubstring("not found"))
exutil.By("check secret prometheus-k8s-proxy removed")
checkSecret, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "prometheus-k8s-proxy", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).To(o.ContainSubstring("not found"))
exutil.By("check prometheus k8s configs, kube-rbac-proxy-web related configs should exist")
checkPrometheusK8s, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-web\")].ports}", "-n", "openshift-monitoring").Output()
o.Expect(checkPrometheusK8s).To(o.ContainSubstring("9091"))
o.Expect(checkPrometheusK8s).To(o.ContainSubstring("web"))
checkPrometheusK8s, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-web\")].volumeMounts}", "-n", "openshift-monitoring").Output()
o.Expect(checkPrometheusK8s).To(o.ContainSubstring("secret-prometheus-k8s-kube-rbac-proxy-web"))
checkPrometheusK8s, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", "-ojsonpath={.spec.secrets}", "-n", "openshift-monitoring").Output()
o.Expect(checkPrometheusK8s).To(o.ContainSubstring("prometheus-k8s-kube-rbac-proxy-web"))
exutil.By("check prometheus k8s pods, prometheus-proxy container is removed")
checkPO, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "prometheus-k8s-0", "-ojsonpath={.spec.containers[*].name}", "-n", "openshift-monitoring").Output()
o.Expect(checkPO).NotTo(o.ContainSubstring("prometheus-proxy"))
exutil.By("check prometheus-k8s servicemonitor, port should be keep at metrics")
checkSM, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ServiceMonitor", "prometheus-k8s", "-ojsonpath={.spec.endpoints[]}", "-n", "openshift-monitoring").Output()
o.Expect(checkSM).To(o.ContainSubstring(`"port":"metrics"`))
exutil.By("check telemeter-client deploy")
checkTL, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "telemeter-client", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"telemeter-client\")].env[?(@.name==\"FROM\")]}", "-n", "openshift-monitoring").Output()
if !strings.Contains(checkTL, `"telemeter-client" not found`) {
o.Expect(checkTL).To(o.ContainSubstring(`"value":"https://prometheus-k8s.openshift-monitoring.svc:9091"`))
}
exutil.By("check secret thanos-querier-kube-rbac-proxy-metrics")
checkSecret, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "thanos-querier-kube-rbac-proxy-metrics", "-ojsonpath={.metadata.labels}", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).To(o.ContainSubstring(`"app.kubernetes.io/component":"query-layer"`))
o.Expect(checkSecret).To(o.ContainSubstring(`"app.kubernetes.io/instance":"thanos-querier"`))
exutil.By("check secret thanos-querier-kube-rbac-proxy-web")
checkSecret, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "thanos-querier-kube-rbac-proxy-web", "-ojsonpath={.metadata.labels}", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).To(o.ContainSubstring(`"app.kubernetes.io/component":"query-layer"`))
o.Expect(checkSecret).To(o.ContainSubstring(`"app.kubernetes.io/instance":"thanos-querier"`))
exutil.By("test role access to prometheus-k8s")
exutil.By("Get token of current user")
token := oc.UserConfig().BearerToken
exutil.By("Get route of prometheus-k8s")
host, hostErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "prometheus-k8s", "-ojsonpath={.spec.host}", "-n", "openshift-monitoring").Output()
o.Expect(hostErr).NotTo(o.HaveOccurred())
exutil.By("test role can NOT access to prometheus-k8s")
// % curl -H "Authorization: Bearer $token" -k "https://$host/api/v1/query?" --data-urlencode 'query=up{namespace="openshift-monitoring"}'
checkMetric(oc, "https://"+host+"/api/v1/query? --data-urlencode 'query=up{namespace=\"openshift-monitoring\"}'", token, "Forbidden", 2*platformLoadTime)
exutil.By("add role access to prometheus-k8s")
admErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "cluster-monitoring-metrics-api", oc.Username()).Execute()
o.Expect(admErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-role-from-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "cluster-monitoring-metrics-api", oc.Username()).Execute()
exutil.By("test role access to prometheus-k8s")
// % curl -H "Authorization: Bearer $token" -k "https://$host/api/v1/query?" --data-urlencode 'query=up{namespace="openshift-monitoring"}'
checkMetric(oc, "https://"+host+"/api/v1/query? --data-urlencode 'query=up{namespace=\"openshift-monitoring\"}'", token, "up", 2*platformLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Medium-72560-Replace oauth-proxy container with kube-rbac-proxy in Alertmanager pods", func() {
exutil.By("check new configs added to alertmanager main")
checkAlertmanager, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanager", "main", "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-web\")]}", "-n", "openshift-monitoring").Output()
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"--secure-listen-address=0.0.0.0:9095"`))
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"--upstream=http://127.0.0.1:9093"`))
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"--config-file=/etc/kube-rbac-proxy/config.yaml"`))
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"name":"kube-rbac-proxy-web"`))
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"mountPath":"/etc/kube-rbac-proxy"`))
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"name":"secret-alertmanager-kube-rbac-proxy-web"`))
exutil.By("check new secret added and old one removed")
checkSecret, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "alertmanager-kube-rbac-proxy-web", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).NotTo(o.ContainSubstring("not found"))
checkSecret, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "alertmanager-main-proxy", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).To(o.ContainSubstring("not found"))
exutil.By("check alertmanager pods, alertmanager-proxy container is removed")
checkPO, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "alertmanager-main-0", "-ojsonpath={.spec.containers[*].name}", "-n", "openshift-monitoring").Output()
o.Expect(checkPO).NotTo(o.ContainSubstring("alertmanager-proxy"))
exutil.By("check role, monitoring-alertmanager-edit add new resourceNames")
checkRole, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("role", "monitoring-alertmanager-edit", "-ojsonpath={.rules}", "-n", "openshift-monitoring").Output()
o.Expect(checkRole).To(o.ContainSubstring(`"resourceNames":["main"]`))
o.Expect(checkRole).To(o.ContainSubstring(`"resources":["alertmanagers/api"]`))
o.Expect(checkRole).To(o.ContainSubstring(`"verbs":["*"]`))
exutil.By("test user access to alertmanager")
exutil.By("Get token of current user")
token := oc.UserConfig().BearerToken
exutil.By("Get route of alertmanager-main")
host, hostErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "alertmanager-main", "-ojsonpath={.spec.host}", "-n", "openshift-monitoring").Output()
o.Expect(hostErr).NotTo(o.HaveOccurred())
exutil.By("test role can NOT access to alertmanager")
// % curl -H "Authorization: Bearer $TOKEN" -k "https://$HOST/api/v2/receivers"
checkMetric(oc, "https://"+host+"/api/v2/receivers", token, "Forbidden", 2*platformLoadTime)
exutil.By("add role access to alertmanager")
admErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "monitoring-alertmanager-edit", oc.Username()).Execute()
o.Expect(admErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-role-from-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "monitoring-alertmanager-edit", oc.Username()).Execute()
exutil.By("test role access to alertmanager")
// % curl -H "Authorization: Bearer $TOKEN" -k "https://$HOST/api/v2/receivers"
checkMetric(oc, "https://"+host+"/api/v2/receivers", token, `"name":"Watchdog"`, 2*platformLoadTime)
})
// author: [email protected]
g.It("Author:juzhao-Medium-73294-add role.rbac.authorization.k8s.io/monitoring-alertmanager-view", func() {
exutil.By("Check monitoring-alertmanager-view role is created")
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("role", "monitoring-alertmanager-view", "-n", "openshift-monitoring").Execute()
if err != nil {
e2e.Logf("Unable to get role monitoring-alertmanager-view.")
}
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Bind monitoring-alertmanager-view role to user")
admErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "monitoring-alertmanager-view", oc.Username()).Execute()
o.Expect(admErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-role-from-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "monitoring-alertmanager-view", oc.Username()).Execute()
exutil.By("Get alertmanager-main route")
host, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "alertmanager-main", "-ojsonpath={.spec.host}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get token of current user")
token := oc.UserConfig().BearerToken
exutil.By("Check monitoring-alertmanager-view role can view receivers and alerts API")
checkMetric(oc, "https://"+host+"/api/v2/receivers", token, "Watchdog", 2*platformLoadTime)
checkMetric(oc, "https://"+host+"/api/v2/alerts?&filter={alertname=\"Watchdog\"}", token, "Watchdog", 2*platformLoadTime)
exutil.By("Check monitoring-alertmanager-view role can not silence alert")
currentTime := time.Now()
start := time.Now().UTC().Format("2006-01-02T15:04:05Z")
twoHoursLater := currentTime.Add(2 * time.Hour)
end := twoHoursLater.UTC().Format("2006-01-02T15:04:05Z")
// % curl -k -H "Authorization: Bearer $token" -X POST -d '{"matchers":[{"name":"alertname","value":"Watchdog"}],"startsAt":"'"$start"'","endsAt":"'"$end"'","createdBy":"testuser","comment":"Silence Watchdog alert"}' https://$HOST/api/v2/silences
curlCmd := `curl -k -H "Authorization: Bearer ` + token + `" -X POST -d '{"matchers":[{"name":"alertname","value":"Watchdog"}],"startsAt":"` + start + `","endsAt":"` + end + `","createdBy":"testuser","comment":"Silence Watchdog alert"}' "https://` + host + `/api/v2/silences"`
out, err := exec.Command("bash", "-c", curlCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(out), "Forbidden")).Should(o.BeTrue())
})
// author: [email protected]
g.It("Author:juzhao-Medium-73288-Enable request headers flags for metrics server", func() {
exutil.By("Check metrics-server deployment exists")
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "metrics-server", "-n", "openshift-monitoring").Execute()
if err != nil {
e2e.Logf("Unable to find metrics-server deployment.")
}
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check request headers flags for metrics server")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy/metrics-server", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"metrics-server\")].args}", "-n", "openshift-monitoring").Output()
params := []string{"requestheader-client-ca-file", "requestheader-allowed-names", "requestheader-extra-headers-prefix", "requestheader-group-headers", "requestheader-username-headers"}
for _, param := range params {
o.Expect(output).To(o.ContainSubstring(param))
}
})
g.Context("user workload monitoring", func() {
var (
uwmMonitoringConfig string
)
g.BeforeEach(func() {
monitoringBaseDir = exutil.FixturePath("testdata", "monitoring")
uwmMonitoringConfig = filepath.Join(monitoringBaseDir, "uwm-monitoring-cm.yaml")
createUWMConfig(oc, uwmMonitoringConfig)
})
g.When("Need example app", func() {
var (
ns string
exampleApp string
)
g.BeforeEach(func() {
exampleApp = filepath.Join(monitoringBaseDir, "example-app.yaml")
//create project
oc.SetupProject()
ns = oc.Namespace()
//create example app and alert rule under the project
exutil.By("Create example app!")
createResourceFromYaml(oc, ns, exampleApp)
exutil.AssertAllPodsToBeReady(oc, ns)
})
// author: [email protected]
g.It("Author:hongyli-Critical-43341-Exclude namespaces from user workload monitoring based on label", func() {
var (
exampleAppRule = filepath.Join(monitoringBaseDir, "example-alert-rule.yaml")
)
exutil.By("label project not being monitored")
labelNameSpace(oc, ns, "openshift.io/user-monitoring=false")
//create example app and alert rule under the project
exutil.By("Create example alert rule!")
createResourceFromYaml(oc, ns, exampleAppRule)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check metrics")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version{namespace=\""+ns+"\"}'", token, "\"result\":[]", 2*uwmLoadTime)
exutil.By("check alerts")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{namespace=\""+ns+"\"}'", token, "\"result\":[]", 2*uwmLoadTime)
exutil.By("label project being monitored")
labelNameSpace(oc, ns, "openshift.io/user-monitoring=true")
exutil.By("check metrics")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version{namespace=\""+ns+"\"}'", token, "prometheus-example-app", 2*uwmLoadTime)
exutil.By("check alerts")
checkMetric(oc, "https://thanos-ruler.openshift-user-workload-monitoring.svc:9091/api/v1/alerts", token, "TestAlert", 2*uwmLoadTime)
})
// author: [email protected]
g.It("Author:hongyli-High-50024-High-49515-Check federate route and service of user workload Prometheus", func() {
var err error
exutil.By("Bind cluster-monitoring-view RBAC to default service account")
uwmFederateRBACViewName := "uwm-federate-rbac-" + ns
defer deleteBindMonitoringViewRoleToDefaultSA(oc, uwmFederateRBACViewName)
clusterRoleBinding, err := bindMonitoringViewRoleToDefaultSA(oc, ns, uwmFederateRBACViewName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Created: %v %v", "ClusterRoleBinding", clusterRoleBinding.Name)
exutil.By("Get token of default service account")
token := getSAToken(oc, "default", ns)
exutil.By("check uwm federate endpoint service")
checkMetric(oc, "https://prometheus-user-workload.openshift-user-workload-monitoring.svc:9092/federate --data-urlencode 'match[]=version'", token, "prometheus-example-app", 2*uwmLoadTime)
exutil.By("check uwm federate route")
checkRoute(oc, "openshift-user-workload-monitoring", "federate", token, "match[]=version", "prometheus-example-app", 100)
})
// author: [email protected]
g.It("Author:tagao-Medium-50241-Prometheus (uwm) externalLabels not showing always in alerts", func() {
var (
exampleAppRule = filepath.Join(monitoringBaseDir, "in-cluster_query_alert_rule.yaml")
)
exutil.By("Create alert rule with expression about data provided by in-cluster prometheus")
createResourceFromYaml(oc, ns, exampleAppRule)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("Check labelmy is in the alert")
checkMetric(oc, "https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts", token, "labelmy", 2*uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Medium-42825-Expose EnforcedTargetLimit in the CMO configuration for UWM", func() {
exutil.By("check user metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version{namespace=\""+ns+"\"}'", token, "prometheus-example-app", 2*uwmLoadTime)
exutil.By("scale deployment replicas to 2")
oc.WithoutNamespace().Run("scale").Args("deployment", "prometheus-example-app", "--replicas=2", "-n", ns).Execute()
exutil.By("check user metrics again, the user metrics can't be found from thanos-querier")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version{namespace=\""+ns+"\"}'", token, "\"result\":[]", 2*uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Medium-49189-Enforce label scrape limits for UWM [Serial]", func() {
var (
invalidUWM = filepath.Join(monitoringBaseDir, "invalid-uwm.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("query metrics from thanos-querier")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version'", token, "prometheus-example-app", uwmLoadTime)
exutil.By("trigger label_limit exceed")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", invalidUWM)
exutil.By("check in thanos-querier /targets api, it should complains the label_limit exceeded")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/targets`, token, `label_limit exceeded`, 2*uwmLoadTime)
exutil.By("trigger label_name_length_limit exceed")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "user-workload-monitoring-config", "-p", `{"data": {"config.yaml": "prometheus:\n enforcedLabelLimit: 8\n enforcedLabelNameLengthLimit: 1\n enforcedLabelValueLengthLimit: 1\n"}}`, "--type=merge", "-n", "openshift-user-workload-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check in thanos-querier /targets api, it should complains the label_name_length_limit exceeded")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/targets`, token, `label_name_length_limit exceeded`, 2*uwmLoadTime)
exutil.By("trigger label_value_length_limit exceed")
err2 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "user-workload-monitoring-config", "-p", `{"data": {"config.yaml": "prometheus:\n enforcedLabelLimit: 8\n enforcedLabelNameLengthLimit: 8\n enforcedLabelValueLengthLimit: 1\n"}}`, "--type=merge", "-n", "openshift-user-workload-monitoring").Execute()
o.Expect(err2).NotTo(o.HaveOccurred())
exutil.By("check in thanos-querier /targets api, it should complains the label_value_length_limit exceeded")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/targets`, token, `label_value_length_limit exceeded`, 2*uwmLoadTime)
exutil.By("relax restrictions")
err3 := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "user-workload-monitoring-config", "-p", `{"data": {"config.yaml": "prometheus:\n enforcedLabelLimit: 10\n enforcedLabelNameLengthLimit: 10\n enforcedLabelValueLengthLimit: 50\n"}}`, "--type=merge", "-n", "openshift-user-workload-monitoring").Execute()
o.Expect(err3).NotTo(o.HaveOccurred())
exutil.By("able to see the metrics")
checkMetric(oc, "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=version'", token, "prometheus-example-app", 2*uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Medium-44805-Expose tenancy-aware labels and values of api v1 label endpoints for Thanos query", func() {
var (
rolebinding = filepath.Join(monitoringBaseDir, "rolebinding.yaml")
)
exutil.By("add RoleBinding to specific user")
createResourceFromYaml(oc, ns, rolebinding)
//oc -n ns1 patch RoleBinding view -p '{"subjects":[{"apiGroup":"rbac.authorization.k8s.io","kind":"User","name":"${user}"}]}'
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("RoleBinding", "view", "-p", `{"subjects":[{"apiGroup":"rbac.authorization.k8s.io","kind":"User","name":"`+oc.Username()+`"}]}`, "--type=merge", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("get user API token")
token := oc.UserConfig().BearerToken
exutil.By("check namespace labels") //There are many labels, only check the few ones
checkMetric(oc, "\"https://thanos-querier.openshift-monitoring.svc:9092/api/v1/labels?namespace="+oc.Namespace()+"\"", token, `"__name__"`, 2*uwmLoadTime)
checkMetric(oc, "\"https://thanos-querier.openshift-monitoring.svc:9092/api/v1/labels?namespace="+oc.Namespace()+"\"", token, `"version"`, 2*uwmLoadTime)
checkMetric(oc, "\"https://thanos-querier.openshift-monitoring.svc:9092/api/v1/labels?namespace="+oc.Namespace()+"\"", token, `"cluster_ip"`, 2*uwmLoadTime)
exutil.By("show label value")
checkMetric(oc, "\"https://thanos-querier.openshift-monitoring.svc:9092/api/v1/label/version/values?namespace="+oc.Namespace()+"\"", token, `"v0.4.1"`, 2*uwmLoadTime)
exutil.By("check with a specific series")
checkMetric(oc, "\"https://thanos-querier.openshift-monitoring.svc:9092/api/v1/series?match[]=version&namespace="+oc.Namespace()+"\"", token, `"service":"prometheus-example-app"`, 2*uwmLoadTime)
})
//author: [email protected]
g.It("Author:tagao-High-73151-Update Prometheus user-workload to enable additional scrape metrics [Serial]", func() {
var (
exampleApp2 = filepath.Join(monitoringBaseDir, "example-app-2-sampleLimit.yaml")
approachingEnforcedSamplesLimit = filepath.Join(monitoringBaseDir, "approachingEnforcedSamplesLimit.yaml")
)
exutil.By("restore monitoring config")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("PrometheusRule", "monitoring-stack-alerts", "-n", ns).Execute()
exutil.By("create example-app2")
//example-app2 has sampleLimit and should be created under same ns with example-app
createResourceFromYaml(oc, ns, exampleApp2)
exutil.By("wait for pod ready")
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
exutil.By("check extra-scrape-metrics added to uwm prometheus")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "user-workload", "-ojsonpath={.spec.enableFeatures}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("extra-scrape-metrics"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("set up the alert rules")
createResourceFromYaml(oc, ns, approachingEnforcedSamplesLimit)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check metrics")
exampleAppPods, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns).Output()
e2e.Logf("pods condition under ns:\n%s", exampleAppPods)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=(scrape_sample_limit == 1)'`, token, "prometheus-example-app-2", uwmLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="ApproachingEnforcedSamplesLimit"}'`, token, `"prometheus-example-app-2"`, uwmLoadTime)
})
})
// author: [email protected]
g.It("Author:hongyli-High-49745-High-50519-Retention for UWM Prometheus and thanos ruler", func() {
exutil.By("Check retention size of prometheus user workload")
checkRetention(oc, "openshift-user-workload-monitoring", "prometheus-user-workload", "storage.tsdb.retention.size=5GiB", uwmLoadTime)
exutil.By("Check retention of prometheus user workload")
checkRetention(oc, "openshift-user-workload-monitoring", "prometheus-user-workload", "storage.tsdb.retention.time=15d", 20)
exutil.By("Check retention of thanos ruler")
checkRetention(oc, "openshift-user-workload-monitoring", "thanos-ruler-user-workload", "retention=15d", uwmLoadTime)
})
// author: [email protected]
g.It("Author:juzhao-LEVEL0-Medium-42956-Should not have PrometheusNotIngestingSamples alert if enabled user workload monitoring only", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check alerts, Should not have PrometheusNotIngestingSamples alert fired")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusNotIngestingSamples"}'`, token, `"result":[]`, uwmLoadTime)
})
// author: [email protected]
g.It("Author:juzhao-Medium-70998-PrometheusRestrictedConfig supports enabling sendExemplars", func() {
exutil.By("check exemplar-storage is enabled")
cmd := "-ojsonpath={.spec.enableFeatures[*]}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "prometheus", "user-workload", cmd, "exemplar-storage", true)
//check settings in UWM prometheus pods
podNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/name=prometheus")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
cmd = "-ojsonpath={.spec.containers[?(@.name==\"prometheus\")].args}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, `--enable-feature=`, true)
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, `exemplar-storage`, true)
}
exutil.By("check sendExemplars is true in UWM prometheus CRD")
cmd = "-ojsonpath={.spec.remoteWrite}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "prometheus", "user-workload", cmd, `"sendExemplars":true`, true)
})
// author: [email protected]
g.It("Author:tagao-LEVEL0-Medium-46301-Allow OpenShift users to configure query log file for Prometheus", func() {
exutil.By("confirm prometheus-k8s-0 pod is ready for check")
MONpod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring").Output()
e2e.Logf("the MON pods condition: %s", MONpod)
assertPodToBeReady(oc, "prometheus-k8s-0", "openshift-monitoring")
ensurePodRemainsReady(oc, "prometheus-k8s-0", "openshift-monitoring", 30*time.Second, 5*time.Second)
cmd := "ls /tmp/promethues_query.log"
checkConfigInsidePod(oc, "openshift-monitoring", "prometheus", "prometheus-k8s-0", cmd, "promethues_query.log", true)
exutil.By("check query log file for prometheus in openshift-monitoring")
queryErr := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "curl", "http://localhost:9090/api/v1/query?query=prometheus_build_info").Execute()
o.Expect(queryErr).NotTo(o.HaveOccurred())
cmd = "cat /tmp/promethues_query.log | grep prometheus_build_info"
checkConfigInsidePod(oc, "openshift-monitoring", "prometheus", "prometheus-k8s-0", cmd, "prometheus_build_info", true)
exutil.By("confirm prometheus-user-workload-0 pod is ready for check")
UWMpod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-user-workload-monitoring").Output()
e2e.Logf("the UWM pods condition: %s", UWMpod)
assertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
ensurePodRemainsReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring", 60*time.Second, 5*time.Second)
cmd = "ls /tmp/uwm_query.log"
checkConfigInsidePod(oc, "openshift-user-workload-monitoring", "prometheus", "prometheus-user-workload-0", cmd, "uwm_query.log", true)
exutil.By("check query log file for prometheus in openshift-user-workload-monitoring")
queryErr = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-user-workload-monitoring", "-c", "prometheus", "prometheus-user-workload-0", "--", "curl", "http://localhost:9090/api/v1/query?query=up").Execute()
o.Expect(queryErr).NotTo(o.HaveOccurred())
cmd = "cat /tmp/uwm_query.log | grep up"
checkConfigInsidePod(oc, "openshift-user-workload-monitoring", "prometheus", "prometheus-user-workload-0", cmd, "up", true)
})
// author: [email protected]
g.It("Author:tagao-Medium-50008-Expose sigv4 settings for remote write in the CMO configuration [Serial]", func() {
var (
sigv4ClusterCM = filepath.Join(monitoringBaseDir, "sigv4-cluster-monitoring-cm.yaml")
sigv4UwmCM = filepath.Join(monitoringBaseDir, "sigv4-uwm-monitoring-cm.yaml")
sigv4Secret = filepath.Join(monitoringBaseDir, "sigv4-secret.yaml")
sigv4SecretUWM = filepath.Join(monitoringBaseDir, "sigv4-secret-uwm.yaml")
)
exutil.By("delete secret/cm at the end of case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "sigv4-credentials-uwm", "-n", "openshift-user-workload-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "sigv4-credentials", "-n", "openshift-monitoring").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Create sigv4 secret under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", sigv4Secret)
exutil.By("Configure remote write sigv4 and enable user workload monitoring")
createResourceFromYaml(oc, "openshift-monitoring", sigv4ClusterCM)
exutil.By("confirm prometheus-k8s-0 pod is ready for check")
pod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=prometheus").Output()
e2e.Logf("the prometheus pods condition: %s", pod)
exutil.AssertPodToBeReady(oc, "prometheus-k8s-0", "openshift-monitoring")
exutil.By("Check sig4 config under openshift-monitoring")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "url: https://authorization.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "sigv4:")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "region: us-central1")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "access_key: basic_user")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "secret_key: basic_pass")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "profile: SomeProfile")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "role_arn: SomeRoleArn")
exutil.By("Create sigv4 secret under openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", sigv4SecretUWM)
exutil.By("Configure remote write sigv4 setting for user workload monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", sigv4UwmCM)
exutil.By("confirm prometheus-user-workload-0 pod is ready for check")
pod, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-user-workload-monitoring", "-l", "app.kubernetes.io/name=prometheus").Output()
e2e.Logf("the prometheus pods condition: %s", pod)
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
exutil.By("Check sig4 config under openshift-user-workload-monitoring")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://authorization.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "sigv4:")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "region: us-east2")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "access_key: basic_user_uwm")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "secret_key: basic_pass_uwm")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "profile: umw_Profile")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "role_arn: umw_RoleArn")
})
// author: [email protected]
g.It("Author:tagao-Medium-49694-Expose OAuth2 settings for remote write in the CMO configuration [Serial]", func() {
var (
oauth2ClusterCM = filepath.Join(monitoringBaseDir, "oauth2-cluster-monitoring-cm.yaml")
oauth2UwmCM = filepath.Join(monitoringBaseDir, "oauth2-uwm-monitoring-cm.yaml")
oauth2Secret = filepath.Join(monitoringBaseDir, "oauth2-secret.yaml")
oauth2SecretUWM = filepath.Join(monitoringBaseDir, "oauth2-secret-uwm.yaml")
)
exutil.By("delete secret/cm at the end of case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "oauth2-credentials", "-n", "openshift-user-workload-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "oauth2-credentials", "-n", "openshift-monitoring").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Create oauth2 secret under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", oauth2Secret)
exutil.By("Configure remote write oauth2 and enable user workload monitoring")
createResourceFromYaml(oc, "openshift-monitoring", oauth2ClusterCM)
exutil.By("Check oauth2 config under openshift-monitoring")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "url: https://test.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "remote_timeout: 30s")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "client_id: basic_user")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "client_secret: basic_pass")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "token_url: https://example.com/oauth2/token")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "scope1")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "scope2")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "param1: value1")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "param2: value2")
exutil.By("Create oauth2 secret under openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", oauth2SecretUWM)
exutil.By("Configure remote write oauth2 setting for user workload monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", oauth2UwmCM)
exutil.By("Check oauth2 config under openshift-user-workload-monitoring")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://test.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "remote_timeout: 30s")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "client_id: basic_user")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "client_secret: basic_pass")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "token_url: https://example.com/oauth2/token")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "scope3")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "scope4")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "param3: value3")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "param4: value4")
})
//author: [email protected]
g.It("Author:tagao-Medium-47519-Platform prometheus operator should reconcile AlertmanagerConfig resources from user namespaces [Serial]", func() {
var (
enableAltmgrConfig = filepath.Join(monitoringBaseDir, "enableUserAlertmanagerConfig.yaml")
wechatConfig = filepath.Join(monitoringBaseDir, "exampleAlertConfigAndSecret.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("enable alert manager config")
createResourceFromYaml(oc, "openshift-monitoring", enableAltmgrConfig)
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
exutil.By("check the initial alertmanager configuration")
checkAlertmanagerConfig(oc, "openshift-monitoring", "alertmanager-main-0", "alertname = Watchdog", true)
exutil.By("create&check alertmanagerconfig under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", wechatConfig)
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanagerconfig/config-example", "secret/wechat-config", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("config-example"))
o.Expect(output).To(o.ContainSubstring("wechat-config"))
exutil.By("check if the new created AlertmanagerConfig is reconciled in the Alertmanager configuration (should not)")
checkAlertmanagerConfig(oc, "openshift-monitoring", "alertmanager-main-0", "wechat", false)
exutil.By("delete the alertmanagerconfig/secret created under openshift-monitoring")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("alertmanagerconfig/config-example", "secret/wechat-config", "-n", "openshift-monitoring").Execute()
exutil.By("create one new project, label the namespace and create the same AlertmanagerConfig")
oc.SetupProject()
ns := oc.Namespace()
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ns, "openshift.io/user-monitoring=false").Execute()
exutil.By("create&check alertmanagerconfig under the namespace")
createResourceFromYaml(oc, ns, wechatConfig)
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanagerconfig/config-example", "secret/wechat-config", "-n", ns).Output()
o.Expect(output2).To(o.ContainSubstring("config-example"))
o.Expect(output2).To(o.ContainSubstring("wechat-config"))
exutil.By("check if the new created AlertmanagerConfig is reconciled in the Alertmanager configuration (should not)")
checkAlertmanagerConfig(oc, "openshift-monitoring", "alertmanager-main-0", "wechat", false)
exutil.By("update the label to true")
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ns, "openshift.io/user-monitoring=true", "--overwrite").Execute()
exutil.By("check if the new created AlertmanagerConfig is reconciled in the Alertmanager configuration")
checkAlertmanagerConfig(oc, "openshift-monitoring", "alertmanager-main-0", "wechat", true)
exutil.By("set enableUserAlertmanagerConfig to false")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "cluster-monitoring-config", "-p", `{"data": {"config.yaml": "alertmanagerMain:\n enableUserAlertmanagerConfig: false\n"}}`, "--type=merge", "-n", "openshift-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("the AlertmanagerConfig from user project is removed")
checkAlertmanagerConfig(oc, "openshift-monitoring", "alertmanager-main-0", "wechat", false)
})
g.It("Author:tagao-Medium-49404-Medium-49176-Expose Authorization settings for remote write in the CMO configuration, Add the relabel config to all user-supplied remote_write configurations [Serial]", func() {
var (
authClusterCM = filepath.Join(monitoringBaseDir, "auth-cluster-monitoring-cm.yaml")
authUwmCM = filepath.Join(monitoringBaseDir, "auth-uwm-monitoring-cm.yaml")
authSecret = filepath.Join(monitoringBaseDir, "auth-secret.yaml")
authSecretUWM = filepath.Join(monitoringBaseDir, "auth-secret-uwm.yaml")
)
exutil.By("delete secret/cm at the end of case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "rw-auth", "-n", "openshift-user-workload-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "rw-auth", "-n", "openshift-monitoring").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Create auth secret under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", authSecret)
exutil.By("Configure remote write auth and enable user workload monitoring")
createResourceFromYaml(oc, "openshift-monitoring", authClusterCM)
exutil.By("confirm prometheus-k8s-0 pod is ready for check")
pod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=prometheus").Output()
e2e.Logf("the prometheus pods condition: %s", pod)
exutil.AssertPodToBeReady(oc, "prometheus-k8s-0", "openshift-monitoring")
exutil.By("Check auth config under openshift-monitoring")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "url: https://remote-write.endpoint")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "target_label: __tmp_openshift_cluster_id__")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "url: https://basicAuth.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "username: basic_user")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "password: basic_pass")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "url: https://authorization.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "__tmp_openshift_cluster_id__")
checkRmtWrtConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "target_label: cluster_id")
exutil.By("Create auth secret under openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", authSecretUWM)
exutil.By("Configure remote write auth setting for user workload monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", authUwmCM)
exutil.By("confirm prometheus-user-workload-0 pod is ready for check")
pod, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-user-workload-monitoring", "-l", "app.kubernetes.io/name=prometheus").Output()
e2e.Logf("the prometheus pods condition: %s", pod)
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
exutil.By("Check auth config under openshift-user-workload-monitoring")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://remote-write.endpoint")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "target_label: __tmp_openshift_cluster_id__")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://basicAuth.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "username: basic_user")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "password: basic_pass")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://bearerTokenFile.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "url: https://authorization.remotewrite.com/api/write")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "__tmp_openshift_cluster_id__")
checkRmtWrtConfig(oc, "openshift-user-workload-monitoring", "prometheus-user-workload-0", "target_label: cluster_id_1")
})
// author: [email protected]
g.It("Author:tagao-Low-43037-Should not have error for oc adm inspect clusteroperator monitoring command", func() {
exutil.By("delete must-gather file at the end of case")
defer exec.Command("bash", "-c", "rm -rf /tmp/must-gather-43037").Output()
exutil.By("oc adm inspect clusteroperator monitoring")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("adm").Args("inspect", "clusteroperator", "monitoring", "--dest-dir=/tmp/must-gather-43037").Output()
o.Expect(output).NotTo(o.ContainSubstring("error"))
})
// author: [email protected]
g.It("Author:tagao-Medium-32224-Separate user workload configuration [Serial]", func() {
var (
separateUwmConf = filepath.Join(monitoringBaseDir, "separate-uwm-config.yaml")
)
exutil.By("delete uwm-config/cm-config and bound pvc at the end of a serial case")
defer func() {
PvcNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pvc", "-ojsonpath={.items[*].metadata.name}", "-l", "app.kubernetes.io/instance=user-workload", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, pvc := range strings.Fields(PvcNames) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", pvc, "-n", "openshift-user-workload-monitoring").Execute()
}
}()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("this case should execute on cluster which have storage class")
checkSc, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sc").Output()
if checkSc == "{}" || !strings.Contains(checkSc, "default") {
g.Skip("This case should execute on cluster which have default storage class!")
}
exutil.By("get master node names with label")
NodeNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/master", "-ojsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(NodeNames)
exutil.By("add labels to master nodes, and delete them at the end of case")
for _, name := range nodeNameList {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", name, "uwm-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", name, "uwm=deploy").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("create the separate user workload configuration")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", separateUwmConf)
exutil.By("check remoteWrite metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=prometheus_remote_storage_shards'`, token, `"url":"http://localhost:1234/receive"`, 3*uwmLoadTime)
exutil.By("check prometheus-user-workload pods are bound to PVCs, check cpu and memory")
PodNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-ojsonpath={.items[*].metadata.name}", "-l", "app.kubernetes.io/name=prometheus", "-n", "openshift-user-workload-monitoring").Output()
PodNameList := strings.Fields(PodNames)
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range PodNameList {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-ojsonpath={.spec.volumes[]}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("uwm-prometheus"))
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, `-ojsonpath={.spec.containers[?(@.name=="prometheus")].resources.requests}`, "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring(`"cpu":"200m","memory":"1Gi"`))
}
exutil.By("check thanos-ruler-user-workload pods are bound to PVCs, check cpu and memory")
PodNames, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-ojsonpath={.items[*].metadata.name}", "-l", "app.kubernetes.io/name=thanos-ruler", "-n", "openshift-user-workload-monitoring").Output()
PodNameList = strings.Fields(PodNames)
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range PodNameList {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-ojsonpath={.spec.volumes[]}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("thanosruler"))
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, `-ojsonpath={.spec.containers[?(@.name=="thanos-ruler")].resources.requests}`, "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring(`"cpu":"20m","memory":"50Mi"`))
}
exutil.By("toleration settings check")
PodNames, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-ojsonpath={.items[*].metadata.name}", "-n", "openshift-user-workload-monitoring").Output()
PodNameList = strings.Fields(PodNames)
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range PodNameList {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-ojsonpath={.spec.tolerations}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("node-role.kubernetes.io/master"))
o.Expect(output).To(o.ContainSubstring(`"operator":"Exists"`))
}
exutil.By("prometheus.enforcedSampleLimit check")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "user-workload", "-ojsonpath={.spec.enforcedSampleLimit}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("2"))
exutil.By("prometheus.retention check")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "user-workload", "-ojsonpath={.spec.retention}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("48h"))
})
// author: [email protected]
g.It("Author:tagao-LEVEL0-Medium-50954-Allow the deployment of a dedicated UWM Alertmanager [Serial]", func() {
var (
dedicatedUWMalertmanager = filepath.Join(monitoringBaseDir, "dedicated-uwm-alertmanager.yaml")
exampleAlert = filepath.Join(monitoringBaseDir, "example-alert-rule.yaml")
AlertmanagerConfig = filepath.Join(monitoringBaseDir, "exampleAlertConfigAndSecret.yaml")
)
exutil.By("delete uwm-config/cm-config and bound pvc at the end of a serial case")
defer func() {
PvcNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pvc", "-ojsonpath={.items[*].metadata.name}", "-l", "alertmanager=user-workload", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, pvc := range strings.Fields(PvcNames) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", pvc, "-n", "openshift-user-workload-monitoring").Execute()
}
}()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("this case should execute on cluster which have storage class")
checkSc, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sc").Output()
if checkSc == "{}" || !strings.Contains(checkSc, "default") {
g.Skip("This case should execute on cluster which have default storage class!")
}
exutil.By("get master node names with label")
NodeNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/master", "-ojsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNameList := strings.Fields(NodeNames)
exutil.By("add labels to master nodes, and delete them at the end of case")
for _, name := range nodeNameList {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", name, "uwm-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", name, "uwm=alertmanager").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("create the dedicated UWM Alertmanager configuration")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", dedicatedUWMalertmanager)
exutil.By("deploy prometheusrule and alertmanagerconfig to user project")
oc.SetupProject()
ns := oc.Namespace()
createResourceFromYaml(oc, ns, exampleAlert)
createResourceFromYaml(oc, ns, AlertmanagerConfig)
exutil.By("check all pods are created")
exutil.AssertAllPodsToBeReady(oc, "openshift-user-workload-monitoring")
exutil.By("confirm thanos-ruler is ready")
exutil.AssertPodToBeReady(oc, "thanos-ruler-user-workload-0", "openshift-user-workload-monitoring")
thanosPod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", " app.kubernetes.io/name=thanos-ruler", "-n", "openshift-user-workload-monitoring").Output()
e2e.Logf("thanos-ruler pods: \n%v", thanosPod)
thanosSaErr := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 60*time.Second, true, func(context.Context) (bool, error) {
thanosSa, err := oc.AsAdmin().Run("get").Args("sa", "thanos-ruler", "-n", "openshift-user-workload-monitoring").Output()
if err != nil || strings.Contains(thanosSa, "not found") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(thanosSaErr, "sa not created")
exutil.By("check the alerts could be found in alertmanager under openshift-user-workload-monitoring project")
token := getSAToken(oc, "thanos-ruler", "openshift-user-workload-monitoring")
checkMetric(oc, `https://alertmanager-user-workload.openshift-user-workload-monitoring.svc:9095/api/v2/alerts`, token, "TestAlert1", 3*uwmLoadTime)
exutil.By("check the alerts could not be found in openshift-monitoring project")
//same as: checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?&filter={alertname="TestAlert1"}`, token, "[]", 3*uwmLoadTime)
checkAlertNotExist(oc, "https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts", token, "TestAlert1", 3*uwmLoadTime)
exutil.By("get alertmanager pod names")
PodNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-ojsonpath={.items[*].metadata.name}", "-l", "app.kubernetes.io/name=alertmanager", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check alertmanager pod resources limits and requests")
for _, pod := range strings.Fields(PodNames) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, `-ojsonpath={.spec.containers[?(@.name=="alertmanager")].resources.limits}`, "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring(`"cpu":"100m","memory":"250Mi"`))
o.Expect(err).NotTo(o.HaveOccurred())
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, `-ojsonpath={.spec.containers[?(@.name=="alertmanager")].resources.requests}`, "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring(`"cpu":"40m","memory":"200Mi"`))
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("check alertmanager pod are bound pvcs")
for _, pod := range strings.Fields(PodNames) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-ojsonpath={.spec.volumes[]}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("uwm-alertmanager"))
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("check AlertmanagerConfigs are take effect")
for _, pod := range strings.Fields(PodNames) {
checkAlertmanagerConfig(oc, "openshift-user-workload-monitoring", pod, "api_url: http://wechatserver:8080/", true)
}
exutil.By("check logLevel is correctly set")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanager/user-workload", "-ojsonpath={.spec.logLevel}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("debug"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check logLevel is take effect")
for _, pod := range strings.Fields(PodNames) {
output, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args("-c", "alertmanager", pod, "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(output, "level=debug") {
e2e.Failf("logLevel is wrong or not take effect")
}
}
exutil.By("disable alertmanager in user-workload-monitoring-config")
//oc patch cm user-workload-monitoring-config -p '{"data": {"config.yaml": "alertmanager:\n enabled: false\n"}}' --type=merge -n openshift-user-workload-monitoring
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "user-workload-monitoring-config", "-p", `{"data": {"config.yaml": "alertmanager:\n enabled: false\n"}}`, "--type=merge", "-n", "openshift-user-workload-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("should found user project alerts in platform alertmanager")
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts`, token, "TestAlert1", 3*uwmLoadTime)
exutil.By("UWM alertmanager pod should disappear") //need time to wait pod fully terminated, put this step after the checkMetric
checkPodDeleted(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/name=alertmanager", "alertmanager")
})
// author: [email protected]
g.It("ConnectedOnly-Author:tagao-Medium-43286-Allow sending alerts to external Alertmanager for user workload monitoring components - enabled in-cluster alertmanager", func() {
var (
testAlertmanager = filepath.Join(monitoringBaseDir, "example-alertmanager.yaml")
exampleAlert = filepath.Join(monitoringBaseDir, "example-alert-rule.yaml")
exampleAlert2 = filepath.Join(monitoringBaseDir, "leaf-prometheus-rule.yaml")
)
exutil.By("create alertmanager and set external alertmanager for prometheus/thanosRuler under openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", testAlertmanager)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("alertmanager", "test-alertmanager", "-n", "openshift-user-workload-monitoring").Execute()
exutil.By("check alertmanager pod is created")
alertmanagerTestPodCheck(oc)
exutil.By("skip case on disconnected cluster")
output, err := oc.AsAdmin().Run("get").Args("pod", "alertmanager-test-alertmanager-0", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the pod condition: %s", output)
if output != "{}" && strings.Contains(output, "ImagePullBackOff") {
g.Skip("This case can not execute on a disconnected cluster!")
}
exutil.By("create example PrometheusRule under user namespace")
oc.SetupProject()
ns1 := oc.Namespace()
createResourceFromYaml(oc, ns1, exampleAlert)
exutil.By("create another user namespace then create PrometheusRule with leaf-prometheus label")
oc.SetupProject()
ns2 := oc.Namespace()
createResourceFromYaml(oc, ns2, exampleAlert2)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check the user alerts TestAlert1 and TestAlert2 are shown in \"in-cluster alertmanager\" API")
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?filter={alertname="TestAlert1"}`, token, "TestAlert1", uwmLoadTime)
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?filter={alertname="TestAlert1"}`, token, `"generatorURL":"https://console-openshift-console.`, uwmLoadTime)
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?filter={alertname="TestAlert2"}`, token, "TestAlert2", uwmLoadTime)
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?filter={alertname="TestAlert2"}`, token, `"generatorURL":"https://console-openshift-console.`, uwmLoadTime)
exutil.By("check the alerts are also sent to external alertmanager")
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="TestAlert1"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", "TestAlert1", uwmLoadTime)
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="TestAlert1"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", `"generatorURL":"https://console-openshift-console.`, uwmLoadTime)
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="TestAlert2"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", "TestAlert2", uwmLoadTime)
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="TestAlert2"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", `"generatorURL":"https://console-openshift-console.`, uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-ConnectedOnly-Medium-43311-Allow sending alerts to external Alertmanager for user workload monitoring components - disabled in-cluster alertmanager [Serial]", func() {
var (
InClusterMonitoringCM = filepath.Join(monitoringBaseDir, "disLocalAlert-setExternalAlert-prometheus.yaml")
testAlertmanager = filepath.Join(monitoringBaseDir, "example-alertmanager.yaml")
exampleAlert = filepath.Join(monitoringBaseDir, "example-alert-rule.yaml")
)
exutil.By("Restore cluster monitoring stack default configuration")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("alertmanager", "test-alertmanager", "-n", "openshift-user-workload-monitoring", "--ignore-not-found").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("disable local alertmanager and set external manager for prometheus")
createResourceFromYaml(oc, "openshift-monitoring", InClusterMonitoringCM)
exutil.By("create alertmanager and set external alertmanager for prometheus/thanosRuler under openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", testAlertmanager)
exutil.By("check alertmanager pod is created")
alertmanagerTestPodCheck(oc)
exutil.By("skip case on disconnected cluster")
cmCheck, _ := oc.AsAdmin().Run("get").Args("cm", "cluster-monitoring-config", "-n", "openshift-monitoring", "-ojson").Output()
poCheck, _ := oc.AsAdmin().Run("get").Args("pod", "-n", "openshift-monitoring").Output()
if !strings.Contains(cmCheck, "telemeter") && !strings.Contains(poCheck, "telemeter") {
g.Skip("This case can not execute on a disconnected cluster!")
}
exutil.By("create example PrometheusRule under user namespace")
oc.SetupProject()
ns1 := oc.Namespace()
createResourceFromYaml(oc, ns1, exampleAlert)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check the user alerts TestAlert1 and in-cluster Watchdog alerts are shown in \"thanos-querier\" API")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="TestAlert1"}'`, token, `TestAlert1`, 3*platformLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="Watchdog"}'`, token, `Watchdog`, 3*platformLoadTime)
exutil.By("check the alerts are also sent to external alertmanager, include the in-cluster and user project alerts")
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="TestAlert1"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", "TestAlert1", 3*uwmLoadTime)
queryFromPod(oc, `http://alertmanager-operated.openshift-user-workload-monitoring.svc:9093/api/v2/alerts?filter={alertname="Watchdog"}`, token, "openshift-user-workload-monitoring", "thanos-ruler-user-workload-0", "thanos-ruler", "Watchdog", 3*uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-ConnectedOnly-Medium-44815-Configure containers to honor the global tlsSecurityProfile", func() {
exutil.By("get global tlsSecurityProfile")
// % oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{.spec.observedConfig.servingInfo.cipherSuites}'
cipherSuites, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeapiservers.operator.openshift.io", "cluster", "-ojsonpath={.spec.observedConfig.servingInfo.cipherSuites}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
cipherSuitesFormat := strings.ReplaceAll(cipherSuites, "\"", "")
cipherSuitesFormat = strings.ReplaceAll(cipherSuitesFormat, "[", "")
cipherSuitesFormat = strings.ReplaceAll(cipherSuitesFormat, "]", "")
e2e.Logf("cipherSuites: %s", cipherSuitesFormat)
// % oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{.spec.observedConfig.servingInfo.minTLSVersion}'
minTLSVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeapiservers.operator.openshift.io", "cluster", "-ojsonpath={.spec.observedConfig.servingInfo.minTLSVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check tls-cipher-suites and tls-min-version for metrics-server under openshift-monitoring")
// % oc -n openshift-monitoring get deploy metrics-server -ojsonpath='{.spec.template.spec.containers[?(@tls-cipher-suites=)].args}'
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "metrics-server", "-ojsonpath={.spec.template.spec.containers[?(@tls-cipher-suites=)].args}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(output, cipherSuitesFormat) {
e2e.Failf("tls-cipher-suites is different from global setting! %s", output)
}
if !strings.Contains(output, minTLSVersion) {
e2e.Failf("tls-min-version is different from global setting! %s", output)
}
exutil.By("check tls-cipher-suites and tls-min-version for all pods which use kube-rbac-proxy container under openshift-monitoring/openshift-user-workload-monitoring")
//oc get pod -l app.kubernetes.io/name=alertmanager -n openshift-monitoring
alertmanagerPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=alertmanager")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range alertmanagerPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-metric\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=node-exporter -n openshift-monitoring
nePodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=node-exporter")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range nePodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=kube-state-metrics -n openshift-monitoring
ksmPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=kube-state-metrics")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range ksmPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-main\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-self\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=openshift-state-metrics -n openshift-monitoring
osmPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=openshift-state-metrics")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range osmPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-main\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-self\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=prometheus -n openshift-monitoring
pk8sPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range pk8sPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-thanos\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=prometheus-operator -n openshift-monitoring
poPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus-operator")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range poPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=telemeter-client -n openshift-monitoring
tcPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=telemeter-client")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range tcPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=thanos-query -n openshift-monitoring
tqPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=thanos-query")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range tqPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-rules\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
cmd = "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-metrics\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/name=prometheus-operator -n openshift-user-workload-monitoring
UWMpoPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/name=prometheus-operator")
// `UWMpoPodNames` should only have one value, otherwise means there are PO pods in progress deleting
e2e.Logf("UWMpoPodNames: %v", UWMpoPodNames)
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range UWMpoPodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy\")].args}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
//oc get pod -l app.kubernetes.io/instance=user-workload -n openshift-user-workload-monitoring
UWMPodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/instance=user-workload")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range UWMPodNames {
// Multiple container: kube-rbac-**** under this label, use fuzzy query
cmd := "-ojsonpath={.spec.containers[?(@tls-cipher-suites)].args}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, cipherSuitesFormat, true)
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, minTLSVersion, true)
}
})
// author: [email protected]
g.It("Author:tagao-LEVEL0-Medium-68237-Add the trusted CA bundle in the Prometheus user workload monitoring pods", func() {
exutil.By("confirm UWM pod is ready")
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
exutil.By("check configmap under namespace: openshift-user-workload-monitoring")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("prometheus-user-workload-trusted-ca-bundle"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check the trusted CA bundle is applied to the pod")
PodNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/name=prometheus")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range PodNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"prometheus\")].volumeMounts}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, "prometheus-user-workload-trusted-ca-bundle", true)
cmd = "-ojsonpath={.spec.volumes[?(@.name==\"prometheus-user-workload-trusted-ca-bundle\")]}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, "prometheus-user-workload-trusted-ca-bundle", true)
}
})
//author: [email protected]
g.It("Author:tagao-Medium-69084-user workLoad components failures leading to CMO degradation/unavailability should be easy to identify [Slow] [Disruptive]", func() {
var (
UserWorkloadTasksFailed = filepath.Join(monitoringBaseDir, "UserWorkloadTasksFailed.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("trigger UserWorkloadTasksFailed")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", UserWorkloadTasksFailed)
exutil.By("check logs in CMO should see UserWorkloadTasksFailed")
CMOPodName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=cluster-monitoring-operator", "-ojsonpath={.items[].metadata.name}").Output()
exutil.WaitAndGetSpecificPodLogs(oc, "openshift-monitoring", "cluster-monitoring-operator", CMOPodName, "UserWorkloadTasksFailed")
})
//author: [email protected]
g.It("Author:tagao-Medium-73112-replace OAuth proxy for Thanos Ruler", func() {
exutil.By("check new secret thanos-user-workload-kube-rbac-proxy-web added")
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
checkSecret, err := oc.AsAdmin().Run("get").Args("secret", "thanos-user-workload-kube-rbac-proxy-web", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(checkSecret).NotTo(o.ContainSubstring("not found"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check old secret thanos-ruler-oauth-cookie removed")
checkSecret, _ = oc.AsAdmin().Run("get").Args("secret", "thanos-ruler-oauth-cookie", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(checkSecret).To(o.ContainSubstring("not found"))
exutil.By("check thanos-ruler sa, `annotations` should be removed")
checkSa, err := oc.AsAdmin().Run("get").Args("sa", "thanos-ruler", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(checkSa).NotTo(o.ContainSubstring("Route"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check thanos-ruler-user-workload pods, thanos-ruler-proxy container is removed")
checkPO, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "thanos-ruler-user-workload-0", "-ojsonpath={.spec.containers[*].name}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(checkPO).NotTo(o.ContainSubstring("thanos-ruler-proxy"))
o.Expect(checkPO).To(o.ContainSubstring("kube-rbac-proxy-web"))
exutil.By("check ThanosRuler, new configs added")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ThanosRuler", "user-workload", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-web\")].args}").Output()
o.Expect(output).To(o.ContainSubstring("config-file=/etc/kube-rbac-proxy/config.yaml"))
o.Expect(output).To(o.ContainSubstring("tls-cert-file=/etc/tls/private/tls.crt"))
o.Expect(output).To(o.ContainSubstring("tls-private-key-file=/etc/tls/private/tls.key"))
})
//author: [email protected]
g.It("Author:tagao-High-73213-Enable controller id for CMO Prometheus resources [Serial]", func() {
var (
uwmEnableAlertmanager = filepath.Join(monitoringBaseDir, "uwm-enableAlertmanager.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("enable alertmanager for uwm")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", uwmEnableAlertmanager)
exutil.By("wait for all pods ready")
exutil.AssertPodToBeReady(oc, "prometheus-user-workload-0", "openshift-user-workload-monitoring")
exutil.AssertPodToBeReady(oc, "alertmanager-user-workload-0", "openshift-user-workload-monitoring")
exutil.AssertPodToBeReady(oc, "thanos-ruler-user-workload-0", "openshift-user-workload-monitoring")
exutil.By("check alertmanager controller-id")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanager", "main", "-n", "openshift-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(output).To(o.ContainSubstring(`"operator.prometheus.io/controller-id":"openshift-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check UWM alertmanager controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanager", "user-workload", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(output).To(o.ContainSubstring(`"operator.prometheus.io/controller-id":"openshift-user-workload-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check prometheus k8s controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", "-n", "openshift-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(output).To(o.ContainSubstring(`"operator.prometheus.io/controller-id":"openshift-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check prometheus-operator deployment controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "prometheus-operator", "-n", "openshift-monitoring", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].args}").Output()
o.Expect(output).To(o.ContainSubstring(`"--controller-id=openshift-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check UWM prometheus-operator deployment controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "prometheus-operator", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].args}").Output()
o.Expect(output).To(o.ContainSubstring(`"--controller-id=openshift-user-workload-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check UWM prometheus user-workload controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "user-workload", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(output).To(o.ContainSubstring(`"operator.prometheus.io/controller-id":"openshift-user-workload-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check ThanosRuler user-workload controller-id")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ThanosRuler", "user-workload", "-n", "openshift-user-workload-monitoring", "-ojsonpath={.metadata.annotations}").Output()
o.Expect(output).To(o.ContainSubstring(`"operator.prometheus.io/controller-id":"openshift-user-workload-monitoring/prometheus-operator"`))
o.Expect(err).NotTo(o.HaveOccurred())
})
//author: [email protected]
g.It("Author:juzhao-Low-73684-UWM statefulset should not lack serviceName", func() {
exutil.By("check spec.serviceName for UWM statefulset")
cmd := "-ojsonpath={.spec.serviceName}}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "statefulset", "prometheus-user-workload", cmd, "prometheus-operated", true)
checkYamlconfig(oc, "openshift-user-workload-monitoring", "statefulset", "thanos-ruler-user-workload", cmd, "thanos-ruler-operated", true)
})
//author: [email protected]
g.It("Author:tagao-Medium-73734-Add ownership annotation for certificates [Serial]", func() {
var (
uwmEnableAlertmanager = filepath.Join(monitoringBaseDir, "uwm-enableAlertmanager.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("enable alertmanager for uwm")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", uwmEnableAlertmanager)
exutil.By("check annotations added to the CM under the namespace openshift-monitoring")
cmd := "-ojsonpath={.metadata.annotations}"
checkYamlconfig(oc, "openshift-monitoring", "cm", "alertmanager-trusted-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
checkYamlconfig(oc, "openshift-monitoring", "cm", "kubelet-serving-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
checkYamlconfig(oc, "openshift-monitoring", "cm", "prometheus-trusted-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
telemeterPod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "app.kubernetes.io/name=telemeter-client", "-n", "openshift-monitoring").Output()
if strings.Contains(telemeterPod, "telemeter-client") {
checkYamlconfig(oc, "openshift-monitoring", "cm", "telemeter-trusted-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
}
exutil.By("check annotations added to the CM under the namespace openshift-user-workload-monitoring")
checkYamlconfig(oc, "openshift-user-workload-monitoring", "cm", "prometheus-user-workload-trusted-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
checkYamlconfig(oc, "openshift-user-workload-monitoring", "cm", "alertmanager-trusted-ca-bundle", cmd, `"openshift.io/owning-component":"Monitoring"`, true)
})
//author: [email protected]
g.It("Author:juzhao-Medium-75489-Set scrape.timestamp tolerance for UWM prometheus", func() {
exutil.By("confirm for UWM prometheus created")
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
prometheus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "user-workload", "-n", "openshift-user-workload-monitoring").Output()
if err != nil || strings.Contains(prometheus, "not found") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "UWM prometheus not created")
exutil.By("check for UWM prometheus scrape.timestamp tolerance")
cmd := `-ojsonpath={.spec.additionalArgs[?(@.name=="scrape.timestamp-tolerance")]}`
checkYamlconfig(oc, "openshift-user-workload-monitoring", "prometheus", "user-workload", cmd, `"value":"15ms"`, true)
exutil.By("check settings in UWM prometheus pods")
podNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-user-workload-monitoring", "app.kubernetes.io/name=prometheus")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"prometheus\")].args}"
checkYamlconfig(oc, "openshift-user-workload-monitoring", "pod", pod, cmd, `--scrape.timestamp-tolerance=15ms`, true)
}
})
// author: [email protected]
g.It("Author:tagao-High-75384-cross-namespace rules for user-workload monitoring [Serial]", func() {
var (
example_cross_ns_alert = filepath.Join(monitoringBaseDir, "example_cross_ns_alert.yaml")
disable_uwm_cross_ns_rules = filepath.Join(monitoringBaseDir, "disable_uwm_cross_ns_rules.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Create a user-monitoring-shared namespace and deploy PrometheusRule")
oc.SetupProject()
ns := oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", "ns-monitoring-75384", "--ignore-not-found").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", "ns-monitoring-75384").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
createResourceFromYaml(oc, "ns-monitoring-75384", example_cross_ns_alert)
exutil.By("check namespace have expect label")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", ns, "-ojsonpath={.metadata.labels}").Output()
o.Expect(output).To(o.ContainSubstring(`"pod-security.kubernetes.io/enforce":"restricted"`))
o.Expect(err).NotTo(o.HaveOccurred())
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", "ns-monitoring-75384", "-ojsonpath={.metadata.labels}").Output()
o.Expect(output).To(o.ContainSubstring(`"pod-security.kubernetes.io/enforce":"restricted"`))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="TestAlert1", namespace="ns-monitoring-75384"}'`, token, `"namespace":"ns-monitoring-75384"`, 2*uwmLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="TestAlert1", namespace="`+ns+`"}'`, token, `"namespace":"`+ns+`"`, 2*uwmLoadTime)
exutil.By("disable the feature")
createResourceFromYaml(oc, "openshift-monitoring", disable_uwm_cross_ns_rules)
exutil.By("check the alert should not share across the namespace")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="TestAlert1", namespace="`+ns+`"}'`, token, `"result":[]`, 2*uwmLoadTime)
})
})
//author: [email protected]
g.It("Author:tagao-Low-30088-User can not deploy ThanosRuler CRs in user namespaces [Serial]", func() {
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("create namespace as a common user (non-admin)")
oc.SetupProject()
ns := oc.Namespace()
exutil.By("check ThanosRuler can not be created")
currentUser, _ := oc.Run("whoami").Args("").Output()
e2e.Logf("current user is: %v", currentUser)
queryErr := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 60*time.Second, true, func(context.Context) (bool, error) {
permissionCheck, _ := oc.WithoutNamespace().Run("auth").Args("can-i", "create", "thanosrulers", "--as="+currentUser, "-n", ns).Output()
if !strings.Contains(permissionCheck, "yes") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(queryErr, "permissionCheck failed to contain \"no\"")
})
//author: [email protected]
g.It("Author:tagao-NonPreRelease-Longduration-Medium-49191-Enforce body_size_limit [Serial]", func() {
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("set `enforcedBodySizeLimit` to 0, and check from the k8s pod")
patchAndCheckBodySizeLimit(oc, "0", "0")
exutil.By("set `enforcedBodySizeLimit` to a invalid value, and check from the k8s pod")
patchAndCheckBodySizeLimit(oc, "20MiBPS", "")
exutil.By("set `enforcedBodySizeLimit` to 1MB to trigger PrometheusScrapeBodySizeLimitHit alert, and check from the k8s pod")
patchAndCheckBodySizeLimit(oc, "1MB", "1MB")
exutil.By("check PrometheusScrapeBodySizeLimitHit alert is triggered")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusScrapeBodySizeLimitHit"}'`, token, "PrometheusScrapeBodySizeLimitHit", 5*uwmLoadTime)
exutil.By("set `enforcedBodySizeLimit` to 40MB, and check from the k8s pod")
patchAndCheckBodySizeLimit(oc, "40MB", "40MB")
exutil.By("check from alert, should not have enforcedBodySizeLimit")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusScrapeBodySizeLimitHit"}'`, token, `"result":[]`, 5*uwmLoadTime)
exutil.By("set `enforcedBodySizeLimit` to automatic, and check from the k8s pod")
patchAndCheckBodySizeLimit(oc, "automatic", "body_size_limit")
exutil.By("check from alert, should not have enforcedBodySizeLimit")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusScrapeBodySizeLimitHit"}'`, token, `"result":[]`, 5*uwmLoadTime)
})
//author: [email protected]
g.It("Author:tagao-High-60485-check On/Off switch of netdev Collector in Node Exporter [Serial]", func() {
var (
disableNetdev = filepath.Join(monitoringBaseDir, "disableNetdev.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check netdev Collector is enabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.netdev"))
exutil.By("check netdev metrics in prometheus k8s pod")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="netdev"}'`, token, `"collector":"netdev"`, uwmLoadTime)
exutil.By("disable netdev in CMO")
createResourceFromYaml(oc, "openshift-monitoring", disableNetdev)
exutil.By("check netdev metrics in prometheus k8s pod again, should not have related metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="netdev"}'`, token, `"result":[]`, 3*uwmLoadTime)
exutil.By("check netdev in daemonset")
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output2).To(o.ContainSubstring("--no-collector.netdev"))
})
//author: [email protected]
g.It("Author:tagao-High-59521-check On/Off switch of cpufreq Collector in Node Exporter [Serial]", func() {
var (
enableCpufreq = filepath.Join(monitoringBaseDir, "enableCpufreq.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check cpufreq Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.cpufreq"))
exutil.By("check cpufreq metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="cpufreq"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable cpufreq in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableCpufreq)
exutil.By("check cpufreq metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="cpufreq"}'`, token, `"collector":"cpufreq"`, 3*uwmLoadTime)
exutil.By("check cpufreq in daemonset")
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output2).To(o.ContainSubstring("--collector.cpufreq"))
})
//author: [email protected]
g.It("Author:tagao-High-60480-check On/Off switch of tcpstat Collector in Node Exporter [Serial]", func() {
var (
enableTcpstat = filepath.Join(monitoringBaseDir, "enableTcpstat.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check tcpstat Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.tcpstat"))
exutil.By("check tcpstat metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="tcpstat"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable tcpstat in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableTcpstat)
exutil.By("check tcpstat metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="tcpstat"}'`, token, `"collector":"tcpstat"`, 3*uwmLoadTime)
exutil.By("check tcpstat in daemonset")
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output2).To(o.ContainSubstring("--collector.tcpstat"))
})
//author: [email protected]
g.It("Author:tagao-High-60582-check On/Off switch of buddyinfo Collector in Node Exporter [Serial]", func() {
var (
enableBuddyinfo = filepath.Join(monitoringBaseDir, "enableBuddyinfo.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check buddyinfo Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.buddyinfo"))
exutil.By("check buddyinfo metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="buddyinfo"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable buddyinfo in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableBuddyinfo)
exutil.By("check buddyinfo metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="buddyinfo"}'`, token, `"collector":"buddyinfo"`, 3*uwmLoadTime)
exutil.By("check buddyinfo in daemonset")
output2, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output2).To(o.ContainSubstring("--collector.buddyinfo"))
})
//author: [email protected]
g.It("Author:juzhao-Medium-59986-Allow to configure secrets in alertmanager component [Serial]", func() {
var (
alertmanagerSecret = filepath.Join(monitoringBaseDir, "alertmanager-secret.yaml")
alertmanagerSecretCM = filepath.Join(monitoringBaseDir, "alertmanager-secret-cm.yaml")
alertmanagerSecretUwmCM = filepath.Join(monitoringBaseDir, "alertmanager-secret-uwm-cm.yaml")
)
exutil.By("delete secrets/user-workload-monitoring-config/cluster-monitoring-config configmap at the end of a serial case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "test-secret", "-n", "openshift-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "slack-api-token", "-n", "openshift-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "test-secret", "-n", "openshift-user-workload-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "slack-api-token", "-n", "openshift-user-workload-monitoring").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("create alertmanager secret in openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", alertmanagerSecret)
exutil.By("enabled UWM and configure alertmanager secret setting in cluster-monitoring-config configmap")
createResourceFromYaml(oc, "openshift-monitoring", alertmanagerSecretCM)
exutil.By("check if the secrets are mounted to alertmanager pod")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
checkConfigInPod(oc, "openshift-monitoring", "alertmanager-main-0", "alertmanager", "ls /etc/alertmanager/secrets/", "test-secret")
checkConfigInPod(oc, "openshift-monitoring", "alertmanager-main-0", "alertmanager", "ls /etc/alertmanager/secrets/", "slack-api-token")
exutil.By("create the same alertmanager secret in openshift-user-workload-monitoring")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", alertmanagerSecret)
exutil.By("configure alertmanager secret setting in user-workload-monitoring-config configmap")
createResourceFromYaml(oc, "openshift-user-workload-monitoring", alertmanagerSecretUwmCM)
exutil.By("check if the secrets are mounted to UWM alertmanager pod")
exutil.AssertAllPodsToBeReady(oc, "openshift-user-workload-monitoring")
checkConfigInPod(oc, "openshift-user-workload-monitoring", "alertmanager-user-workload-0", "alertmanager", "ls /etc/alertmanager/secrets/", "test-secret")
checkConfigInPod(oc, "openshift-user-workload-monitoring", "alertmanager-user-workload-0", "alertmanager", "ls /etc/alertmanager/secrets/", "slack-api-token")
})
//author: [email protected]
g.It("Author:juzhao-Medium-60532-TechPreview feature is not enabled and collectionProfile is set to valid value [Serial]", func() {
var (
collectionProfileminimal = filepath.Join(monitoringBaseDir, "collectionProfile_minimal.yaml")
)
exutil.By("delete user-workload-monitoring-config/cluster-monitoring-config configmap at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("skip the case in TechPreview feature enabled cluster")
featureSet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("FeatureGate/cluster", "-ojsonpath={.spec}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("featureSet is: %s", featureSet)
if featureSet != "{}" && strings.Contains(featureSet, "TechPreviewNoUpgrade") {
g.Skip("This case is not suitable for TechPreview enabled cluster!")
}
exutil.By("set collectionProfile to minimal in cluster-monitoring-config configmap")
createResourceFromYaml(oc, "openshift-monitoring", collectionProfileminimal)
exutil.By("should see error in CMO logs which indicate collectionProfiles is a TechPreview feature")
CMOPodName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=cluster-monitoring-operator", "-ojsonpath={.items[].metadata.name}").Output()
checkLogsInContainer(oc, "openshift-monitoring", CMOPodName, "cluster-monitoring-operator", "collectionProfiles is currently a TechPreview feature")
})
//author: [email protected]
g.It("Author:tagao-Low-60534-check gomaxprocs setting of Node Exporter in CMO [Serial]", func() {
var (
setGomaxprocsTo1 = filepath.Join(monitoringBaseDir, "setGomaxprocsTo1.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check default gomaxprocs value is 0")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset", "node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--runtime.gomaxprocs=0"))
exutil.By("set gomaxprocs value to 1")
createResourceFromYaml(oc, "openshift-monitoring", setGomaxprocsTo1)
exutil.By("check gomaxprocs value in daemonset")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
cmd := "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "daemonset", "node-exporter", cmd, "--runtime.gomaxprocs=1", true)
})
//author: [email protected]
g.It("Author:tagao-High-60486-check On/Off switch of netclass Collector and netlink backend in Node Exporter [Serial]", func() {
var (
disableNetclass = filepath.Join(monitoringBaseDir, "disableNetclass.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check netclass Collector is enabled by default, so as netlink")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
//oc -n openshift-monitoring get daemonset.apps/node-exporter -ojsonpath='{.spec.template.spec.containers[?(@.name=="node-exporter")].args}'
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.netclass"))
o.Expect(output).To(o.ContainSubstring("--collector.netclass.netlink"))
exutil.By("check netclass metrics in prometheus k8s pod")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="netclass"}'`, token, `"collector":"netclass"`, uwmLoadTime)
exutil.By("disable netclass in CMO")
createResourceFromYaml(oc, "openshift-monitoring", disableNetclass)
exutil.By("check netclass metrics in prometheus k8s pod again, should not have related metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="netclass"}'`, token, `"result":[]`, 3*uwmLoadTime)
exutil.By("check netclass/netlink in daemonset")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.netclass"))
o.Expect(output).NotTo(o.ContainSubstring("--collector.netclass.netlink"))
})
//author: [email protected]
g.It("Author:tagao-High-63659-check On/Off switch of ksmd Collector in Node Exporter [Serial]", func() {
var (
enableKsmd = filepath.Join(monitoringBaseDir, "enableKsmd.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check ksmd Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.ksmd"))
exutil.By("check ksmd metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="ksmd"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable ksmd in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableKsmd)
exutil.By("check ksmd metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="ksmd"}'`, token, `"collector":"ksmd"`, 3*uwmLoadTime)
exutil.By("check ksmd in daemonset")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.ksmd"))
})
// author: [email protected]
g.It("Author:tagao-LEVEL0-High-64537-CMO deploys monitoring console-plugin [Serial]", func() {
var (
monitoringPluginConfig = filepath.Join(monitoringBaseDir, "monitoringPlugin-config.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("skip the case if console CO is absent")
checkCO, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(checkCO, "console") {
g.Skip("This case is not executable when console CO is absent")
}
exutil.By("apply monitoringPlugin config and check config applied")
createResourceFromYaml(oc, "openshift-monitoring", monitoringPluginConfig)
//check new config takes effect
cmd := "-ojsonpath={.spec.template.spec.containers[].resources}"
checkYamlconfig(oc, "openshift-monitoring", "deployment", "monitoring-plugin", cmd, `{"limits":{"cpu":"30m","memory":"120Mi"},"requests":{"cpu":"15m","memory":"60Mi"}}`, true)
exutil.By("check monitoring-plugin ConsolePlugin/PodDisruptionBudget/ServiceAccount/Service are exist")
resourceNames := []string{"ConsolePlugin", "ServiceAccount", "Service"}
for _, resource := range resourceNames {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource, "monitoring-plugin", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("monitoring-plugin"))
o.Expect(err).NotTo(o.HaveOccurred())
}
//SNO cluster do not have PDB under openshift-monitoring
//hypershift-hosted cluster do not have master node
checkPodDisruptionBudgetIfNotSNO(oc)
exutil.By("check monitoring-plugin pods are ready")
getReadyPodsWithLabels(oc, "openshift-monitoring", "app.kubernetes.io/component=monitoring-plugin")
exutil.By("get monitoring-plugin pod name")
monitoringPluginPodNames, err := getAllRunningPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/component=monitoring-plugin")
o.Expect(err).NotTo(o.HaveOccurred())
getDeploymentReplicas(oc, "openshift-monitoring", "monitoring-plugin")
waitForPodsToMatchReplicas(oc, "openshift-monitoring", "monitoring-plugin", "app.kubernetes.io/component=monitoring-plugin")
exutil.By("check monitoring-plugin pod config")
e2e.Logf("monitoringPluginPodNames: %v", monitoringPluginPodNames)
for _, pod := range monitoringPluginPodNames {
exutil.AssertPodToBeReady(oc, pod, "openshift-monitoring")
cmd := "-ojsonpath={.spec.nodeSelector}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `{"node-role.kubernetes.io/worker":""}`, true)
cmd = "-ojsonpath={.spec.topologySpreadConstraints}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `{"maxSkew":1,"topologyKey":"kubernetes.io/hostname","whenUnsatisfiable":"DoNotSchedule"}`, true)
cmd = "-ojsonpath={.spec.tolerations}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `{"operator":"Exists"}`, true)
cmd = "-ojsonpath={.spec.containers[].resources}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"requests":{"cpu":"15m","memory":"60Mi"}`, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"limits":{"cpu":"30m","memory":"120Mi"}`, true)
}
})
// author: [email protected]
g.It("Author:tagao-High-63657-check On/Off switch of systemd Collector in Node Exporter [Serial]", func() {
var (
enableSystemdUnits = filepath.Join(monitoringBaseDir, "enableSystemdUnits.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check systemd Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.systemd"))
exutil.By("check systemd metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="systemd"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable systemd and units in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableSystemdUnits)
exutil.By("check systemd related metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="systemd"}'`, token, `"collector":"systemd"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_systemd_system_running'`, token, `"node_systemd_system_running"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_systemd_timer_last_trigger_seconds'`, token, `"node_systemd_timer_last_trigger_seconds"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_systemd_units'`, token, `"node_systemd_units"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_systemd_version'`, token, `"node_systemd_version"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_systemd_unit_state'`, token, `"node_systemd_unit_state"`, 3*uwmLoadTime)
exutil.By("check systemd in daemonset")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.systemd"))
o.Expect(output).To(o.ContainSubstring("--collector.systemd.unit-include=^(network.+|nss.+|logrotate.timer)$"))
})
// author: [email protected]
g.It("Author:tagao-High-63658-check On/Off switch of mountstats Collector in Node Exporter [Serial]", func() {
var (
enableMountstats = filepath.Join(monitoringBaseDir, "enableMountstats.yaml")
enableMountstatsNFS = filepath.Join(monitoringBaseDir, "enableMountstats_nfs.yaml")
)
exutil.By("delete uwm-config/cm-config and pvcs at the end of the case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", "-l", "app.kubernetes.io/name=prometheus", "-n", "openshift-monitoring").Execute()
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check mountstats collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.mountstats"))
exutil.By("check mountstats metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="mountstats"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable mountstats in CMO")
createResourceFromYaml(oc, "openshift-monitoring", enableMountstats)
exutil.By("check mountstats metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="mountstats"}'`, token, `"collector":"mountstats"`, 3*uwmLoadTime)
exutil.By("check mountstats in daemonset")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.mountstats"))
exutil.By("check nfs metrics if need")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("sc").Output()
if strings.Contains(output, "nfs") {
createResourceFromYaml(oc, "openshift-monitoring", enableMountstatsNFS)
exutil.AssertPodToBeReady(oc, "prometheus-k8s-0", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_mountstats_nfs_read_bytes_total'`, token, `"__name__":"node_mountstats_nfs_read_bytes_total"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_mountstats_nfs_write_bytes_total'`, token, `"__name__":"node_mountstats_nfs_write_bytes_total"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_mountstats_nfs_operations_requests_total'`, token, `"__name__":"node_mountstats_nfs_operations_requests_total"`, 3*uwmLoadTime)
} else {
e2e.Logf("no need to check nfs metrics for this env")
}
})
// author: [email protected]
g.It("Author:tagao-Medium-64868-netclass/netdev device configuration [Serial]", func() {
var (
ignoredNetworkDevices = filepath.Join(monitoringBaseDir, "ignoredNetworkDevices-lo.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check netclass/netdev device configuration")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15}|enP.*|ovn-k8s-mp[0-9]*|br-ex|br-int|br-ext|br[0-9]*|tun[0-9]*|cali[a-f0-9]*)$"))
o.Expect(output).To(o.ContainSubstring("--collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15}|enP.*|ovn-k8s-mp[0-9]*|br-ex|br-int|br-ext|br[0-9]*|tun[0-9]*|cali[a-f0-9]*)$"))
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check lo devices exist, and able to see related metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=group by(device) (node_network_info)'`, token, `"device":"lo"`, uwmLoadTime)
exutil.By("modify cm to ignore lo devices")
createResourceFromYaml(oc, "openshift-monitoring", ignoredNetworkDevices)
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
exutil.By("check metrics again, should not see lo device metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_network_info{device="lo"}'`, token, `"result":[]`, 3*uwmLoadTime)
exutil.By("check netclass/netdev device configuration, no lo devices")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.netclass.ignored-devices=^(lo)$"))
o.Expect(output).To(o.ContainSubstring("--collector.netdev.device-exclude=^(lo)$"))
exutil.By("modify cm to ignore all devices")
// % oc -n openshift-monitoring patch cm cluster-monitoring-config -p '{"data": {"config.yaml": "nodeExporter:\n ignoredNetworkDevices: [.*]"}}' --type=merge
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "cluster-monitoring-config", "-p", `{"data": {"config.yaml": "nodeExporter:\n ignoredNetworkDevices: [.*]"}}`, "--type=merge", "-n", "openshift-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check metrics again, should not see all device metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=group by(device) (node_network_info)'`, token, `"result":[]`, 3*uwmLoadTime)
exutil.By("check netclass/netdev device configuration again")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.netclass.ignored-devices=^(.*)$"))
o.Expect(output).To(o.ContainSubstring("--collector.netdev.device-exclude=^(.*)$"))
})
// author: [email protected]
g.It("Author:tagao-LEVEL0-Medium-64296-disable CORS headers on Thanos querier [Serial]", func() {
var (
enableCORS = filepath.Join(monitoringBaseDir, "enableCORS.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check the default enableCORS value is false")
// oc -n openshift-monitoring get deployments.apps thanos-querier -o jsonpath='{.spec.template.spec.containers[?(@.name=="thanos-query")].args}' |jq
thanosQueryArgs, getArgsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployments/thanos-querier", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"thanos-query\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(getArgsErr).NotTo(o.HaveOccurred(), "Failed to get thanos-query container args definition")
o.Expect(thanosQueryArgs).To(o.ContainSubstring("--web.disable-cors"))
exutil.By("set enableCORS as true")
createResourceFromYaml(oc, "openshift-monitoring", enableCORS)
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
exutil.By("check the config again")
cmd := "-ojsonpath={.spec.template.spec.containers[?(@.name==\"thanos-query\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "deployments", "thanos-querier", cmd, `--web.disable-cors`, false)
})
//author: [email protected]
g.It("Author:tagao-Medium-43106-disable Alertmanager deployment[Serial]", func() {
var (
disableAlertmanager = filepath.Join(monitoringBaseDir, "disableAlertmanager.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("disable alertmanager in CMO config")
createResourceFromYaml(oc, "openshift-monitoring", disableAlertmanager)
exutil.AssertAllPodsToBeReady(oc, "openshift-user-workload-monitoring")
// this step is aim to give time let CMO removing alertmanager resources
exutil.By("confirm alertmanager is down")
checkPodDeleted(oc, "openshift-monitoring", "alertmanager=main", "alertmanager")
exutil.By("check alertmanager resources are removed")
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 90*time.Second, false, func(context.Context) (bool, error) {
resourceNames := []string{"route", "servicemonitor", "serviceaccounts", "statefulset", "services", "endpoints", "alertmanagers", "prometheusrules", "clusterrolebindings", "roles"}
for _, resource := range resourceNames {
output, outputErr := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource, "-n", "openshift-monitoring").Output()
if outputErr != nil || strings.Contains(output, "alertmanager") {
return false, nil
}
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "one or more alertmanager resources not removed yet")
exutil.By("check on clusterroles")
clusterroles, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterroles", "-l", "app.kubernetes.io/part-of=openshift-monitoring").Output()
o.Expect(clusterroles).NotTo(o.ContainSubstring("alertmanager"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check on configmaps")
checkCM, _ := exec.Command("bash", "-c", `oc -n openshift-monitoring get cm -l app.kubernetes.io/managed-by=cluster-monitoring-operator | grep alertmanager`).Output()
e2e.Logf("check result is: %v", checkCM)
o.Expect(checkCM).NotTo(o.ContainSubstring("alertmanager-trusted-ca-bundle"))
exutil.By("check on rolebindings")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("rolebindings", "-n", "openshift-monitoring").Output()
o.Expect(output).NotTo(o.ContainSubstring("alertmanager-prometheusk8s"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check Watchdog alert exist")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertstate="firing",alertname="Watchdog"}'`, token, `"alertname":"Watchdog"`, uwmLoadTime)
})
// author: [email protected]
g.It("Author:juzhao-Medium-66736-add option to specify resource requests and limits for components [Serial]", func() {
var (
clusterResources = filepath.Join(monitoringBaseDir, "cluster_resources.yaml")
uwmResources = filepath.Join(monitoringBaseDir, "uwm_resources.yaml")
)
exutil.By("delete user-workload-monitoring-config/cluster-monitoring-config configmap at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
createResourceFromYaml(oc, "openshift-monitoring", clusterResources)
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
exutil.By("by default there is not resources.limits setting for the components, check the result for kube_pod_container_resource_limits of node-exporter pod to see if the setting loaded to components, same for other components")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="node-exporter",namespace="openshift-monitoring"}'`, token, `"pod":"node-exporter-`, 3*uwmLoadTime)
exutil.By("check the resources.requests and resources.limits setting loaded to node-exporter daemonset")
// oc -n openshift-monitoring get daemonset node-exporter -o jsonpath='{.spec.template.spec.containers[?(@.name=="node-exporter")].resources.requests}'
result, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get node-exporter container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"40Mi"`))
// oc -n openshift-monitoring get daemonset node-exporter -o jsonpath='{.spec.template.spec.containers[?(@.name=="node-exporter")].resources.limits}'
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get node-exporter container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"20m","memory":"100Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for kube-state-metrics")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="kube-state-metrics",namespace="openshift-monitoring"}'`, token, `"pod":"kube-state-metrics-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/kube-state-metrics", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"kube-state-metrics\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get kube-state-metrics container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"3m","memory":"100Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/kube-state-metrics", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"kube-state-metrics\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get kube-state-metrics container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"200Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for openshift-state-metrics")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="openshift-state-metrics",namespace="openshift-monitoring"}'`, token, `"pod":"openshift-state-metrics-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/openshift-state-metrics", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"openshift-state-metrics\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get openshift-state-metrics container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"2m","memory":"40Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/openshift-state-metrics", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"openshift-state-metrics\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get openshift-state-metrics container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"20m","memory":"100Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for metrics-server")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="metrics-server",namespace="openshift-monitoring"}'`, token, `"pod":"metrics-server-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/metrics-server", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"metrics-server\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get metrics-server container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"2m","memory":"80Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/metrics-server", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"metrics-server\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get metrics-server container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"100Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for prometheus-operator")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="prometheus-operator",namespace="openshift-monitoring"}'`, token, `"pod":"prometheus-operator-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get prometheus-operator container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"200Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get prometheus-operator container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"20m","memory":"300Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for prometheus-operator-admission-webhook")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="prometheus-operator-admission-webhook",namespace="openshift-monitoring"}'`, token, `"pod":"prometheus-operator-admission-webhook-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator-admission-webhook", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator-admission-webhook\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get prometheus-operator-admission-webhook container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"50Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator-admission-webhook", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator-admission-webhook\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get prometheus-operator-admission-webhook container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"20m","memory":"100Mi"`))
exutil.By("check the resources.requests and resources.limits take effect for telemeter-client")
telemeterPod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "app.kubernetes.io/name=telemeter-client", "-n", "openshift-monitoring").Output()
if strings.Contains(telemeterPod, "telemeter-client") {
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="telemeter-client",namespace="openshift-monitoring"}'`, token, `"pod":"telemeter-client-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/telemeter-client", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"telemeter-client\")].resources.requests}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get telemeter-client container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"2m","memory":"50Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/telemeter-client", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"telemeter-client\")].resources.limits}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get telemeter-client container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"100Mi"`))
}
createResourceFromYaml(oc, "openshift-user-workload-monitoring", uwmResources)
exutil.AssertAllPodsToBeReady(oc, "openshift-user-workload-monitoring")
exutil.By("check the resources.requests and resources.limits for uwm prometheus-operator")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_container_resource_limits{container="prometheus-operator",namespace="openshift-user-workload-monitoring"}'`, token, `"pod":"prometheus-operator-`, 3*uwmLoadTime)
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].resources.requests}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get UWM prometheus-operator container resources.requests setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"2m","memory":"20Mi"`))
result, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment/prometheus-operator", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"prometheus-operator\")].resources.limits}", "-n", "openshift-user-workload-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get UWM prometheus-operator container resources.limits setting")
o.Expect(result).To(o.ContainSubstring(`"cpu":"10m","memory":"100Mi"`))
})
//author: [email protected]
g.It("Author:tagao-High-67503-check On/Off switch of processes Collector in Node Exporter [Serial]", func() {
var (
enableProcesses = filepath.Join(monitoringBaseDir, "enableProcesses.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check processes Collector is disabled by default")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--no-collector.processes"))
exutil.By("check processes metrics in prometheus k8s pod, should not have related metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="processes"}'`, token, `"result":[]`, uwmLoadTime)
exutil.By("enable processes in CMO config")
createResourceFromYaml(oc, "openshift-monitoring", enableProcesses)
exutil.By("check processes metrics in prometheus k8s pod again")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="processes"}'`, token, `"collector":"processes"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_processes_max_processes'`, token, `"__name__":"node_processes_max_processes"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_processes_pids'`, token, `"__name__":"node_processes_pids"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_processes_state'`, token, `"__name__":"node_processes_state"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_processes_threads'`, token, `"__name__":"node_processes_threads"`, 3*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_processes_threads_state'`, token, `"__name__":"node_processes_threads_state"`, 3*uwmLoadTime)
exutil.By("check processes in daemonset")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("--collector.processes"))
})
// author: [email protected]
g.It("Author:tagao-Medium-73009-CMO is correctly forwarding current proxy config to the prometheus operator in remote write configs [Serial]", func() {
var (
remotewriteCM = filepath.Join(monitoringBaseDir, "example-remotewrite-cm.yaml")
)
exutil.By("check cluster proxy")
checkProxy, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-ojsonpath={.spec}").Output()
if checkProxy == "{}" || !strings.Contains(checkProxy, `http`) {
g.Skip("This case should execute on a proxy cluster!")
}
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Create example remotewrite cm under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", remotewriteCM)
exutil.By("get http and https proxy URL")
httpProxy, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-ojsonpath={.spec.httpProxy}").Output()
httpsProxy, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-ojsonpath={.spec.httpsProxy}").Output()
e2e.Logf("httpProxy:\n%s", httpProxy)
e2e.Logf("httpsProxy:\n%s", httpsProxy)
exutil.By("check prometheus remoteWrite configs applied")
cmd := "-ojsonpath={.spec.remoteWrite[]}"
checkValue := `"url":"https://test.remotewrite.com/api/write"`
checkYamlconfig(oc, "openshift-monitoring", "prometheuses", "k8s", cmd, checkValue, true)
proxyUrl, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheuses", "k8s", "-ojsonpath={.spec.remoteWrite[].proxyUrl}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("proxyUrl:\n%s", proxyUrl)
exutil.By("check remoteWrite proxyUrl should be same as cluster proxy")
if strings.Contains(proxyUrl, httpsProxy) {
o.Expect(proxyUrl).NotTo(o.Equal(""))
o.Expect(proxyUrl).To(o.Equal(httpsProxy))
}
if !strings.Contains(proxyUrl, httpsProxy) {
o.Expect(proxyUrl).NotTo(o.Equal(""))
o.Expect(proxyUrl).To(o.Equal(httpProxy))
}
})
// author: [email protected]
g.It("Author:tagao-Medium-73834-trigger PrometheusOperatorRejectedResources alert [Serial]", func() {
var (
PrometheusOperatorRejectedResources = filepath.Join(monitoringBaseDir, "PrometheusOperatorRejectedResources.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check the alert exist")
cmd := "-ojsonpath={.spec.groups[].rules[?(@.alert==\"PrometheusOperatorRejectedResources\")]}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "prometheus-operator-rules", cmd, "PrometheusOperatorRejectedResources", true)
exutil.By("trigger PrometheusOperatorRejectedResources alert")
oc.SetupProject()
ns := oc.Namespace()
createResourceFromYaml(oc, ns, PrometheusOperatorRejectedResources)
exutil.By("check alert metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusOperatorRejectedResources"}'`, token, `PrometheusOperatorRejectedResources`, 3*uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Medium-73805-trigger PrometheusRuleFailures alert [Serial]", func() {
var (
PrometheusRuleFailures = filepath.Join(monitoringBaseDir, "PrometheusRuleFailures.yaml")
)
exutil.By("delete uwm-config/cm-config and test alert at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("PrometheusRule", "example-alert", "-n", "openshift-monitoring", "--ignore-not-found").Execute()
exutil.By("check the alert exist")
cmd := "-ojsonpath={.spec.groups[].rules[?(@.alert==\"PrometheusRuleFailures\")]}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "prometheus-k8s-prometheus-rules", cmd, "PrometheusRuleFailures", true)
exutil.By("trigger PrometheusRuleFailures alert")
createResourceFromYaml(oc, "openshift-monitoring", PrometheusRuleFailures)
exutil.By("check alert metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=sum(irate(container_network_receive_bytes_total{pod!=""}[5m])) BY (pod, interface) + on(pod, interface) group_left(network_name) pod_network_name_info'`, token, `"error":"found duplicate series for the match group`, uwmLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusRuleFailures"}'`, token, `PrometheusRuleFailures`, 3*uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Medium-73804-trigger TargetDown alert [Serial]", func() {
var (
exampleApp = filepath.Join(monitoringBaseDir, "example-app.yaml")
)
exutil.By("delete uwm-config/cm-config and example-app at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("deployment/prometheus-example-app", "service/prometheus-example-app", "servicemonitor/prometheus-example-monitor", "-n", "openshift-monitoring", "--ignore-not-found").Execute()
exutil.By("check the alert exist")
cmd := "-ojsonpath={.spec.groups[].rules[?(@.alert==\"TargetDown\")]}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "cluster-monitoring-operator-prometheus-rules", cmd, "TargetDown", true)
exutil.By("trigger TargetDown alert")
createResourceFromYaml(oc, "openshift-monitoring", exampleApp)
//% oc patch ServiceMonitor/prometheus-example-monitor -n openshift-monitoring --type json -p '[{"op": "add", "path": "/spec/endpoints/0/scheme", "value": "https"}]'
patchConfig := `[{"op": "add", "path": "/spec/endpoints/0/scheme", "value":"https"}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("servicemonitor", "prometheus-example-monitor", "-p", patchConfig, "--type=json", "-n", "openshift-monitoring").Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("check alert metrics")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="TargetDown",job="prometheus-example-app"}'`, token, `"alertname":"TargetDown"`, 3*uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Medium-74734-Alert for broken Prometheus Kube Service Discovery", func() {
var (
exampleApp = filepath.Join(monitoringBaseDir, "example-app.yaml")
)
exutil.By("confirm the alert existed")
// % oc -n openshift-monitoring get prometheusrules prometheus-k8s-prometheus-rules -ojsonpath='{.spec.groups[].rules[?(@.alert=="PrometheusKubernetesListWatchFailures")]}' |jq
cmd := "-ojsonpath={.spec.groups[].rules[?(@.alert==\"PrometheusKubernetesListWatchFailures\")]}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "prometheus-k8s-prometheus-rules", cmd, `"alert":"PrometheusKubernetesListWatchFailures"`, true)
exutil.By("create a namespace and deploy example-app")
oc.SetupProject()
ns := oc.Namespace()
createResourceFromYaml(oc, ns, exampleApp)
exutil.By("add label to the namespace")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ns, "openshift.io/cluster-monitoring-").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ns, "openshift.io/cluster-monitoring=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
label, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespace", ns, `-ojsonpath={.metadata.labels}`).Output()
e2e.Logf("test namespace labels: \n%v", label)
o.Expect(label).To(o.ContainSubstring(`openshift.io/cluster-monitoring":"true`))
exutil.By("confirm prometheus pod is ready")
assertPodToBeReady(oc, "prometheus-k8s-0", "openshift-monitoring")
exutil.By("confirm thanos-query pod is ready")
//% oc get pod -n openshift-monitoring -l app.kubernetes.io/name=thanos-query
waitErr := oc.AsAdmin().WithoutNamespace().Run("wait").Args("pod", "-l", "app.kubernetes.io/name=thanos-query", "-n", "openshift-monitoring", "--for=condition=Ready", "--timeout=3m").Execute()
o.Expect(waitErr).NotTo(o.HaveOccurred())
// debug log
MONpod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring").Output()
e2e.Logf("the MON pods condition: %s", MONpod)
exutil.By("check the alert is triggered")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusKubernetesListWatchFailures"}'`, token, `"alertname":"PrometheusKubernetesListWatchFailures"`, 3*uwmLoadTime)
exutil.By("check logs in prometheus pod")
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus", "prometheus", `cannot list resource \"pods\" in API group \"\" in the namespace \"`+ns+`\"`, true)
})
// author: [email protected]
g.It("Author:tagao-Medium-74311-trigger PrometheusRemoteWriteBehind alert [Serial]", func() {
var (
PrometheusRemoteWriteBehind = filepath.Join(monitoringBaseDir, "PrometheusRemoteWriteBehind.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("create fake remoteWrite")
createResourceFromYaml(oc, "openshift-monitoring", PrometheusRemoteWriteBehind)
exutil.By("check the alert exist")
cmd := "-ojsonpath={.spec.groups[].rules[?(@.alert==\"PrometheusRemoteWriteBehind\")]}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "prometheus-k8s-prometheus-rules", cmd, "PrometheusRemoteWriteBehind", true)
exutil.By("check logs in pod")
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus", "prometheus", "no such host", true)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check alert triggered")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusRemoteWriteBehind"}'`, token, `"alertname":"PrometheusRemoteWriteBehind"`, 2*uwmLoadTime)
})
// author: [email protected]
g.It("Author:tagao-Medium-76282-monitoring-plugin should reload cert/key files dynamically [Serial]", func() {
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check openshift-monitoring/monitoring-plugin-cert secret exist")
//% oc -n openshift-monitoring get secret monitoring-plugin-cert -ojsonpath='{.data}'
cmd := "-ojsonpath={.data}"
checkYamlconfig(oc, "openshift-monitoring", "secret", "monitoring-plugin-cert", cmd, `tls.crt`, true)
checkYamlconfig(oc, "openshift-monitoring", "secret", "monitoring-plugin-cert", cmd, `tls.key`, true)
secretBefore, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "monitoring-plugin-cert", "-ojsonpath={.data}", "-n", "openshift-monitoring").Output()
exutil.By("delete openshift-monitoring/monitoring-plugin-cert secret")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", "monitoring-plugin-cert", "-n", "openshift-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check the secret re-created")
checkYamlconfig(oc, "openshift-monitoring", "secret", "monitoring-plugin-cert", cmd, `tls.crt`, true)
checkYamlconfig(oc, "openshift-monitoring", "secret", "monitoring-plugin-cert", cmd, `tls.key`, true)
secretAfter, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "monitoring-plugin-cert", "-ojsonpath={.data}", "-n", "openshift-monitoring").Output()
exutil.By("check the secret have a new hash")
if strings.Compare(secretBefore, secretAfter) == 0 {
e2e.Failf("secret not changed!")
}
})
// author: [email protected]
g.It("Author:tagao-Medium-73291-Graduate MetricsServer FeatureGate to GA [Serial]", func() {
var (
metrics_server_test = filepath.Join(monitoringBaseDir, "metrics_server_test.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check metrics-server pods are ready")
getReadyPodsWithLabels(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
exutil.By("label master node with metrics-server label")
nodeList, err := getNodesWithLabel(oc, "node-role.kubernetes.io/master")
o.Expect(err).NotTo(o.HaveOccurred())
for _, node := range nodeList {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, "metricsserver-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, "metricsserver=deploy").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("schedule metrics-server pods to master node")
createResourceFromYaml(oc, "openshift-monitoring", metrics_server_test)
podCheck := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 300*time.Second, true, func(context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/component=metrics-server").Output()
if err != nil || strings.Contains(output, "Terminating") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(podCheck, "metrics-server pods did not restarting!")
exutil.By("confirm metrics-server pods scheduled to master nodes, this step may take few mins")
getReadyPodsWithLabels(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
podNames, err := getAllRunningPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
nodeName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", pod, "-ojsonpath={.spec.nodeName}", "-n", "openshift-monitoring").Output()
nodeCheck, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-ojsonpath={.metadata.labels}").Output()
o.Expect(strings.Contains(string(nodeCheck), "node-role.kubernetes.io/master")).Should(o.BeTrue())
}
exutil.By("check config applied")
for _, pod := range podNames {
// % oc -n openshift-monitoring get pod metrics-server-7778dbf79b-8frpq -o jsonpath='{.spec.nodeSelector}' | jq
cmd := "-ojsonpath={.spec.nodeSelector}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"metricsserver":"deploy"`, true)
// % oc -n openshift-monitoring get pod metrics-server-7778dbf79b-8frpq -o jsonpath='{.spec.topologySpreadConstraints}' | jq
cmd = "-ojsonpath={.spec.topologySpreadConstraints}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"app.kubernetes.io/name":"metrics-server"`, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"maxSkew":2`, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"topologyKey":"metricsserver"`, true)
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `"whenUnsatisfiable":"DoNotSchedule"`, true)
// % oc get pod -n openshift-monitoring metrics-server-c8cbfd6ff-pnk2z -o go-template='{{range.spec.containers}}{{"Container Name: "}}{{.name}}{{"\r\nresources: "}}{{.resources}}{{"\n"}}{{end}}'
cmd = `-ogo-template={{range.spec.containers}}{{"Container Name: "}}{{.name}}{{"\r\nresources: "}}{{.resources}}{{"\n"}}{{end}}`
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `resources: map[limits:map[cpu:50m memory:500Mi] requests:map[cpu:10m memory:50Mi]]`, true)
}
})
// author: [email protected]
g.It("Author:tagao-Medium-72776-Enable audit logging to Metrics Server - invalid value [Serial]", func() {
var (
invalid_value_audit_profile = filepath.Join(monitoringBaseDir, "invalid_value_audit_profile.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check default audit level is Metadata")
//% oc -n openshift-monitoring get deploy metrics-server -ojsonpath='{.spec.template.spec.containers[?(@.name=="metrics-server")].args}' | jq
cmd := `-ojsonpath={.spec.template.spec.containers[?(@.name=="metrics-server")].args}`
checkYamlconfig(oc, "openshift-monitoring", "deploy", "metrics-server", cmd, `"--audit-policy-file=/etc/audit/metadata-profile.yaml"`, true)
exutil.By("set invalid value for audit profile")
createResourceFromYaml(oc, "openshift-monitoring", invalid_value_audit_profile)
exutil.By("check failed log in CMO")
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=cluster-monitoring-operator", "cluster-monitoring-operator", `adapter audit profile: metadata`, true)
})
// author: [email protected]
g.It("Author:tagao-Medium-72707-Enable audit logging to Metrics Server [Serial]", func() {
var (
valid_value_audit_profile = filepath.Join(monitoringBaseDir, "valid_value_audit_profile.yaml")
)
exutil.By("delete uwm-config/cm-config at the end of the case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check audit file path")
//% oc -n openshift-monitoring get deploy metrics-server -ojsonpath='{.spec.template.spec.containers[?(@.name=="metrics-server")].args}' | jq
cmd := `-ojsonpath={.spec.template.spec.containers[?(@.name=="metrics-server")].args}`
checkYamlconfig(oc, "openshift-monitoring", "deploy", "metrics-server", cmd, `"--audit-policy-file=/etc/audit/metadata-profile.yaml"`, true)
exutil.By("check the audit log")
//% oc -n openshift-monitoring exec -c metrics-server metrics-server-777f5464ff-5fdvh -- cat /var/log/metrics-server/audit.log
getReadyPodsWithLabels(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
podNames, err := getAllRunningPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
cmd := "cat /var/log/metrics-server/audit.log"
checkConfigInsidePod(oc, "openshift-monitoring", "metrics-server", pod, cmd, `"level":"Metadata"`, true)
}
exutil.By("set audit profile as Request")
createResourceFromYaml(oc, "openshift-monitoring", valid_value_audit_profile)
exutil.By("check the deploy config applied")
//oc -n openshift-monitoring get deploy metrics-server -ojsonpath='{.spec.template.spec.containers[?(@.name=="metrics-server")].args}' | jq
cmd = `-ojsonpath={.spec.template.spec.containers[?(@.name=="metrics-server")].args}`
checkYamlconfig(oc, "openshift-monitoring", "deploy", "metrics-server", cmd, `"--audit-policy-file=/etc/audit/request-profile.yaml"`, true)
exutil.By("check the policy reflect into pod")
getReadyPodsWithLabels(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
podNames, err = getAllRunningPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/component=metrics-server")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
//oc -n openshift-monitoring exec -c metrics-server metrics-server-85db9c79c8-sljdb -- cat /etc/audit/request-profile.yaml
cmd := "cat /etc/audit/request-profile.yaml"
checkConfigInsidePod(oc, "openshift-monitoring", "metrics-server", pod, cmd, `"name": "Request"`, true)
checkConfigInsidePod(oc, "openshift-monitoring", "metrics-server", pod, cmd, `"level": "Request"`, true)
//oc -n openshift-monitoring exec -c metrics-server metrics-server-85db9c79c8-sljdb -- cat /var/log/metrics-server/audit.log
cmd = "cat /var/log/metrics-server/audit.log"
checkConfigInsidePod(oc, "openshift-monitoring", "metrics-server", pod, cmd, `level":"Request"`, true)
}
})
// author: [email protected]
g.It("Author:hongyli-Critical-44032-Restore cluster monitoring stack default configuration [Serial]", func() {
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("Delete config map user-workload--monitoring-config")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
exutil.By("Delete config map cluster-monitoring-config")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("alertmanager", "test-alertmanager", "-n", "openshift-user-workload-monitoring", "--ignore-not-found").Execute()
exutil.By("Delete alertmanager under openshift-user-workload-monitoring")
})
})
|
package monitoring
| ||||
test case
|
openshift/openshift-tests-private
|
3caf92e6-c518-47c7-bbb0-9ffcfcc53797
|
Author:hongyli-High-49073-Retention size settings for platform
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:hongyli-High-49073-Retention size settings for platform", func() {
checkRetention(oc, "openshift-monitoring", "prometheus-k8s", "storage.tsdb.retention.size=10GiB", platformLoadTime)
checkRetention(oc, "openshift-monitoring", "prometheus-k8s", "storage.tsdb.retention.time=45d", 20)
})
| |||||
test case
|
openshift/openshift-tests-private
|
b51cd12a-f50f-40b2-9c0a-38f85cf9cde7
|
Author:hongyli-High-49514-federate service endpoint and route of platform Prometheus
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:hongyli-High-49514-federate service endpoint and route of platform Prometheus", func() {
var err error
exutil.By("Bind cluster-monitoring-view cluster role to current user")
clusterRoleBindingName := "clusterMonitoringViewFederate"
defer deleteClusterRoleBinding(oc, clusterRoleBindingName)
clusterRoleBinding, err := bindClusterRoleToUser(oc, "cluster-monitoring-view", oc.Username(), clusterRoleBindingName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Created: %v %v", "ClusterRoleBinding", clusterRoleBinding.Name)
exutil.By("Get token of current user")
token := oc.UserConfig().BearerToken
exutil.By("check federate endpoint service")
checkMetric(oc, "https://prometheus-k8s.openshift-monitoring.svc:9091/federate --data-urlencode 'match[]=prometheus_build_info'", token, "prometheus_build_info", 3*platformLoadTime)
exutil.By("check federate route")
checkRoute(oc, "openshift-monitoring", "prometheus-k8s-federate", token, "match[]=prometheus_build_info", "prometheus_build_info", 3*platformLoadTime)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
d4577c01-c699-463a-b1f9-05c27451862d
|
Author:juzhao-LEVEL0-Medium-49172-Enable validating webhook for AlertmanagerConfig customer resource
|
['"context"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-LEVEL0-Medium-49172-Enable validating webhook for AlertmanagerConfig customer resource", func() {
var (
err error
output string
namespace string
invalidAlertmanagerConfig = filepath.Join(monitoringBaseDir, "invalid-alertmanagerconfig.yaml")
validAlertmanagerConfig = filepath.Join(monitoringBaseDir, "valid-alertmanagerconfig.yaml")
)
exutil.By("Get prometheus-operator-admission-webhook deployment")
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "prometheus-operator-admission-webhook", "-n", "openshift-monitoring").Execute()
if err != nil {
e2e.Logf("Unable to get deployment prometheus-operator-admission-webhook.")
}
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
namespace = oc.Namespace()
exutil.By("confirm alertmanagerconfigs CRD exists")
err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
alertmanagerconfigs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("customresourcedefinitions", "alertmanagerconfigs.monitoring.coreos.com").Output()
if err != nil || strings.Contains(alertmanagerconfigs, "not found") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "alertmanagerconfigs CRD does not exist")
exutil.By("Create invalid AlertmanagerConfig, should throw out error")
output, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", invalidAlertmanagerConfig, "-n", namespace).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("The AlertmanagerConfig \"invalid-test-config\" is invalid"))
exutil.By("Create valid AlertmanagerConfig, should not have error")
output, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", validAlertmanagerConfig, "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("valid-test-config created"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
79bb93ce-1311-48c1-b01b-348f85590442
|
Author:tagao-Medium-42800-Allow configuration of the log level for Alertmanager in the CMO configmap
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-42800-Allow configuration of the log level for Alertmanager in the CMO configmap", func() {
exutil.By("Check alertmanager container logs")
exutil.WaitAndGetSpecificPodLogs(oc, "openshift-monitoring", "alertmanager", "alertmanager-main-0", "level=debug")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
4bf0d5a2-7a73-485c-b010-010b8619d89a
|
Author:juzhao-Medium-43748-Ensure label namespace exists on all alerts
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-43748-Ensure label namespace exists on all alerts", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check alerts, should have label namespace exists on all alerts")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="Watchdog"}'`, token, `"namespace":"openshift-monitoring"`, 2*platformLoadTime)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
93202ba8-f68f-4d2a-991a-39288d93a6cb
|
Author:tagao-Medium-47307-Add external label of origin to platform alerts
|
['g "github.com/onsi/ginkgo/v2"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-47307-Add external label of origin to platform alerts", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check alerts, could see the `openshift_io_alert_source` field for in-cluster alerts")
checkMetric(oc, "https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts", token, `"openshift_io_alert_source":"platform"`, 2*platformLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
7d399180-4d1f-46ee-a2aa-25a89fabc0fc
|
Author:tagao-Medium-45163-Show labels for pods/nodes/namespaces/PV/PVC/PDB in metrics
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-45163-Show labels for pods/nodes/namespaces/PV/PVC/PDB in metrics", func() {
var (
ns string
helloPodPvc = filepath.Join(monitoringBaseDir, "helloPodPvc.yaml")
)
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check if the cluster have default storage class")
checkSC, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sc", "--no-headers").Output()
e2e.Logf("storage class: %s", checkSC)
hasSC := false
if strings.Contains(checkSC, "default") {
hasSC = true
exutil.By("create project ns then attach pv/pvc")
oc.SetupProject()
ns = oc.Namespace()
createResourceFromYaml(oc, ns, helloPodPvc)
}
exutil.By("Check labels for pod")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_pod_labels{pod="alertmanager-main-0"}'`, token, `"label_statefulset_kubernetes_io_pod_name"`, uwmLoadTime)
exutil.By("Check labels for node")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_node_labels'`, token, `"label_kubernetes_io_hostname"`, uwmLoadTime)
exutil.By("Check labels for namespace")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_namespace_labels{namespace="openshift-monitoring"}'`, token, `"label_kubernetes_io_metadata_name"`, uwmLoadTime)
exutil.By("Check labels for PDB")
checkPDB, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pdb", "thanos-querier-pdb", "-n", "openshift-monitoring").Output()
if !strings.Contains(checkPDB, `"thanos-querier-pdb" not found`) {
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_poddisruptionbudget_labels{poddisruptionbudget="thanos-querier-pdb"}'`, token, `"label_app_kubernetes_io_name"`, uwmLoadTime)
}
exutil.By("Check labels for PV/PVC if need")
if hasSC {
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_persistentvolume_labels'`, token, `"persistentvolume"`, 2*uwmLoadTime)
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=kube_persistentvolumeclaim_labels'`, token, `"persistentvolumeclaim"`, 2*uwmLoadTime)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
50d2900f-0989-4722-9e0a-be033938abbe
|
Author:tagao-Medium-48432-Allow OpenShift users to configure request logging for Thanos Querier query endpoint
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-48432-Allow OpenShift users to configure request logging for Thanos Querier query endpoint", func() {
exutil.By("check thanos-querier pods are normal and able to see the request.logging-config setting")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
cmd := "-ojsonpath={.spec.template.spec.containers[?(@.name==\"thanos-query\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "deploy", "thanos-querier", cmd, "request.logging-config", true)
//thanos-querier pod name will changed when cm modified, pods may not restart yet during the first check
exutil.By("double confirm thanos-querier pods are ready")
podList, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/instance=thanos-querier")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podList {
exutil.AssertPodToBeReady(oc, pod, "openshift-monitoring")
}
exutil.By("query with thanos-querier svc")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="Watchdog"}'`, token, `Watchdog`, 3*uwmLoadTime)
exutil.By("check from thanos-querier logs")
//oc -n openshift-monitoring logs -l app.kubernetes.io/instance=thanos-querier -c thanos-query --tail=-1
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/instance=thanos-querier", "thanos-query", `Watchdog`, true)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
b1fb5091-5e3f-40ff-a907-c466e7d14aa8
|
Author:juzhao-Low-43038-Should not have error for loading OpenAPI spec for v1beta1.metrics.k8s.io
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Low-43038-Should not have error for loading OpenAPI spec for v1beta1.metrics.k8s.io", func() {
var (
searchString string
result string
)
searchString = "loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with:"
podList, err := exutil.GetAllPodsWithLabel(oc, "openshift-kube-apiserver", "app=openshift-kube-apiserver")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("kube-apiserver Pods: %v", podList)
exutil.By("check the kube-apiserver logs, should not have error for v1beta1.metrics.k8s.io")
for _, pod := range podList {
exutil.AssertPodToBeReady(oc, pod, "openshift-kube-apiserver")
result, _ = exutil.GetSpecificPodLogs(oc, "openshift-kube-apiserver", "kube-apiserver", pod, searchString)
e2e.Logf("output result in logs: %v", result)
o.Expect(len(result) == 0).To(o.BeTrue(), "found the error logs which is unexpected")
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
8b98814e-f0d1-4d8b-8e47-114fb3aa5141
|
Author:tagao-Low-55670-Prometheus should not collecting error messages for completed pods [Serial]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Low-55670-Prometheus should not collecting error messages for completed pods [Serial]", func() {
exutil.By("delete user-workload-monitoring-config/cluster-monitoring-config configmap at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("check pod conditioning in openshift-kube-scheduler")
podStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-kube-scheduler").Output()
e2e.Logf("kube-scheduler Pods:\n%s", podStatus)
o.Expect(podStatus).To(o.ContainSubstring("Completed"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check metrics-server pod logs")
exutil.AssertAllPodsToBeReady(oc, "openshift-monitoring")
output, logsErr := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-l", "app.kubernetes.io/name=metrics-server", "-c", "metrics-server", "--tail=-1", "-n", "openshift-monitoring").Output()
o.Expect(logsErr).NotTo(o.HaveOccurred())
if strings.Contains(output, "unable to fetch CPU metrics for pod openshift-kube-scheduler/") {
e2e.Logf("output result in logs:\n%s", output)
e2e.Failf("found unexpected logs")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
7070e244-914c-4fa8-9861-733f71dbb328
|
Author:tagao-LEVEL0-Medium-55767-Missing metrics in kube-state-metrics
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-LEVEL0-Medium-55767-Missing metrics in kube-state-metrics", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check kube-state-metrics metrics, the following metrics should be visible")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/label/__name__/values`, token, `"kube_pod_container_status_terminated_reason"`, uwmLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/label/__name__/values`, token, `"kube_pod_init_container_status_terminated_reason"`, uwmLoadTime)
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/label/__name__/values`, token, `"kube_pod_status_scheduled_time"`, uwmLoadTime)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
1af86438-9eae-406a-8b19-f0ee76c6c2f3
|
Author:tagao-High-56168-PreChkUpgrade-NonPreRelease-Prometheus never sees endpoint propagation of a deleted pod
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-56168-PreChkUpgrade-NonPreRelease-Prometheus never sees endpoint propagation of a deleted pod", func() {
var (
ns = "56168-upgrade-ns"
exampleApp = filepath.Join(monitoringBaseDir, "example-app.yaml")
roleBinding = filepath.Join(monitoringBaseDir, "sa-prometheus-k8s-access.yaml")
)
exutil.By("Create example app")
oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", ns).Execute()
createResourceFromYaml(oc, ns, exampleApp)
exutil.AssertAllPodsToBeReady(oc, ns)
exutil.By("add role and role binding for example app")
createResourceFromYaml(oc, ns, roleBinding)
exutil.By("label namespace")
oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ns, "openshift.io/cluster-monitoring=true").Execute()
exutil.By("check target is up")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/targets`, token, "up", 2*uwmLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
854e691d-6f79-4b3b-9e5e-e716c9516fac
|
Author:tagao-High-56168-PstChkUpgrade-NonPreRelease-Prometheus never sees endpoint propagation of a deleted pod
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-High-56168-PstChkUpgrade-NonPreRelease-Prometheus never sees endpoint propagation of a deleted pod", func() {
exutil.By("get the ns name in PreChkUpgrade")
ns := "56168-upgrade-ns"
exutil.By("delete related resource at the end of case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", ns).Execute()
exutil.By("delete example app deployment")
deleteApp, _ := oc.AsAdmin().WithoutNamespace().Run("delete").Args("deploy", "prometheus-example-app", "-n", ns).Output()
o.Expect(deleteApp).To(o.ContainSubstring(`"prometheus-example-app" deleted`))
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check metric up==0 under the test project, return null")
checkMetric(oc, "https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=up{namespace=\"56168-upgrade-ns\"}==0'", token, `"result":[]`, 2*uwmLoadTime)
exutil.By("check no alert 'TargetDown'")
checkAlertNotExist(oc, "https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{namespace=\"56168-upgrade-ns\"}'", token, "TargetDown", uwmLoadTime)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
f784e2a2-0251-4e83-910c-daad99554f9b
|
Author:tagao-LEVEL0-Medium-57254-oc adm top node/pod output should not give negative numbers
|
['"os/exec"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-LEVEL0-Medium-57254-oc adm top node/pod output should not give negative numbers", func() {
exutil.By("check on node")
checkNode, err := exec.Command("bash", "-c", `oc adm top node | awk '{print $2,$3,$4,$5}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(checkNode).NotTo(o.ContainSubstring("-"))
exutil.By("check on pod under specific namespace")
checkNs, err := exec.Command("bash", "-c", `oc -n openshift-monitoring adm top pod | awk '{print $2,$3}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(checkNs).NotTo(o.ContainSubstring("-"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
75136885-7fc9-4b33-a673-99e398c81080
|
ConnectedOnly-Author:tagao-LEVEL0-Medium-55696-add telemeter alert TelemeterClientFailures
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("ConnectedOnly-Author:tagao-LEVEL0-Medium-55696-add telemeter alert TelemeterClientFailures", func() {
exutil.By("check telemetry prometheusrule exists")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheusrules", "telemetry", "-n", "openshift-monitoring").Output()
// Error from server (NotFound): prometheusrules.monitoring.coreos.com "telemetry" not found
if strings.Contains(output, `"telemetry" not found`) {
e2e.Logf("output: %s", output)
g.Skip("this env does not have telemetry prometheusrule, skip the case")
}
exutil.By("check TelemeterClientFailures alert is added")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheusrules", "telemetry", "-ojsonpath={.spec.groups}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("TelemeterClientFailures"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
e5e7824c-310a-4c86-a0a2-1bcd28b10355
|
Author:juzhao-Medium-62092-Don't fire NodeFilesystemAlmostOutOfSpace alert for certain tmpfs mount points
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-62092-Don't fire NodeFilesystemAlmostOutOfSpace alert for certain tmpfs mount points", func() {
exutil.By("check NodeFilesystemAlmostOutOfSpace alert from node-exporter-rules prometheusrules")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheusrules", "node-exporter-rules", `-ojsonpath={.spec.groups[*].rules[?(@.alert=="NodeFilesystemAlmostOutOfSpace")].expr}`, "-n", "openshift-monitoring").Output()
e2e.Logf("NodeFilesystemAlmostOutOfSpace alert expr: %v", output)
exutil.By("mountpoint /var/lib/ibmc-s3fs.* is excluded")
o.Expect(output).To(o.ContainSubstring(`mountpoint!~"/var/lib/ibmc-s3fs.*"`))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
8d765ac8-395b-4ba3-815a-b901f6036f78
|
Author:tagao-Medium-48350-create alert-routing-edit role to allow end users to manage alerting CR
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-48350-create alert-routing-edit role to allow end users to manage alerting CR", func() {
var (
alertManagerConfig = filepath.Join(monitoringBaseDir, "valid-alertmanagerconfig.yaml")
)
exutil.By("check clusterrole alert-routing-edit exists")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrole").Output()
o.Expect(strings.Contains(output, "alert-routing-edit")).To(o.BeTrue())
exutil.By("create project, add alert-routing-edit RoleBinding to specific user")
oc.SetupProject()
ns := oc.Namespace()
err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "-n", ns, "alert-routing-edit", oc.Username()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("create AlertmanagerConfig under the project")
createResourceFromYaml(oc, ns, alertManagerConfig)
exutil.By("check AlertmanagerConfig is created")
output, _ = oc.WithoutNamespace().Run("get").Args("AlertmanagerConfig", "-n", ns).Output()
o.Expect(output).To(o.ContainSubstring("valid-test-config"))
exutil.By("the user should able to change AlertmanagerConfig")
err = oc.WithoutNamespace().Run("patch").Args("AlertmanagerConfig", "valid-test-config", "-p", `{"spec":{"receivers":[{"name":"webhook","webhookConfigs":[{"url":"https://test.io/push"}]}]}}`, "--type=merge", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check AlertmanagerConfig is updated")
output, _ = oc.WithoutNamespace().Run("get").Args("AlertmanagerConfig", "valid-test-config", "-ojsonpath={.spec.receivers}", "-n", ns).Output()
o.Expect(output).To(o.ContainSubstring("https://test.io/push"))
exutil.By("the user should able to delete AlertmanagerConfig")
err = oc.WithoutNamespace().Run("delete").Args("AlertmanagerConfig", "valid-test-config", "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check AlertmanagerConfig is deleted")
output, _ = oc.WithoutNamespace().Run("get").Args("AlertmanagerConfig", "-n", ns).Output()
o.Expect(output).NotTo(o.ContainSubstring("valid-test-config"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
62bb12cc-49fe-432f-9bd3-79af9d20c99d
|
Author:juzhao-Low-62957-Prometheus and Alertmanager should configure ExternalURL correctly
|
['"strings"', 'g "github.com/onsi/ginkgo/v2"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Low-62957-Prometheus and Alertmanager should configure ExternalURL correctly", func() {
exutil.By("skip the case if there is not console operator enabled")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperators", "console").Output()
// Error from server (NotFound): clusteroperators.config.openshift.io "console" not found
if strings.Contains(output, `"console" not found`) {
e2e.Logf("output: %s", output)
g.Skip("this cluster does not have console clusteroperator, skip the case")
}
exutil.By("get console route")
consoleURL, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "console", `-ojsonpath={.spec.host}`, "-n", "openshift-console").Output()
e2e.Logf("console route is: %v", consoleURL)
exutil.By("get externalUrl for alertmanager main")
alertExternalUrl, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanager", "main", `-ojsonpath={.spec.externalUrl}`, "-n", "openshift-monitoring").Output()
e2e.Logf("alertmanager main externalUrl is: %v", alertExternalUrl)
o.Expect(alertExternalUrl).To(o.ContainSubstring("https://" + consoleURL))
exutil.By("get externalUrl for prometheus k8s")
prometheusExternalUrl, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", `-ojsonpath={.spec.externalUrl}`, "-n", "openshift-monitoring").Output()
e2e.Logf("prometheus k8s externalUrl is: %v", prometheusExternalUrl)
o.Expect(prometheusExternalUrl).To(o.ContainSubstring("https://" + consoleURL))
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check from alertmanager API, the generatorURL should include https://${consoleURL}")
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?&filter={alertname="Watchdog"}`, token, `"generatorURL":"https://`+consoleURL, 2*platformLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
a8a35df0-b774-47ba-8e20-b1662a3e42cc
|
Author:tagao-Medium-48942-validation for scrapeTimeout and relabel configs
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-48942-validation for scrapeTimeout and relabel configs", func() {
var (
invalidServiceMonitor = filepath.Join(monitoringBaseDir, "invalid-ServiceMonitor.yaml")
)
exutil.By("delete test ServiceMonitor at the end of case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("servicemonitor", "console-test-monitoring", "-n", "openshift-monitoring").Execute()
exutil.By("create one ServiceMonitor, set scrapeTimeout bigger than scrapeInterval, and no targetLabel setting")
createResourceFromYaml(oc, "openshift-monitoring", invalidServiceMonitor)
exutil.By("able to see error in prometheus-operator logs")
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus-operator", "prometheus-operator", `error="scrapeTimeout \"120s\" greater than scrapeInterval \"30s\""`, true)
exutil.By("check the configuration is not loaded to prometheus")
checkPrometheusConfig(oc, "openshift-monitoring", "prometheus-k8s-0", `serviceMonitor/openshift-monitoring/console-test-monitoring/0`, false)
exutil.By("edit ServiceMonitor, and set value for scrapeTimeout less than scrapeInterval")
//oc patch servicemonitor console-test-monitoring --type='json' -p='[{"op": "replace", "path": "/spec/endpoints/0/scrapeTimeout", "value":"20s"}]' -n openshift-monitoring
patchConfig := `[{"op": "replace", "path": "/spec/endpoints/0/scrapeTimeout", "value":"20s"}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("servicemonitor", "console-test-monitoring", "-p", patchConfig, "--type=json", "-n", "openshift-monitoring").Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("able to see error for missing targetLabel in prometheus-operator logs")
checkLogWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus-operator", "prometheus-operator", `relabel configuration for replace action needs targetLabel value`, true)
exutil.By("add targetLabel to ServiceMonitor")
//oc -n openshift-monitoring patch servicemonitor console-test-monitoring --type='json' -p='[{"op": "add", "path": "/spec/endpoints/0/relabelings/0/targetLabel", "value": "namespace"}]'
patchConfig = `[{"op": "add", "path": "/spec/endpoints/0/relabelings/0/targetLabel", "value": "namespace"}]`
patchErr = oc.AsAdmin().WithoutNamespace().Run("patch").Args("servicemonitor", "console-test-monitoring", "-p", patchConfig, "--type=json", "-n", "openshift-monitoring").Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.By("check the configuration loaded to prometheus")
checkPrometheusConfig(oc, "openshift-monitoring", "prometheus-k8s-0", "serviceMonitor/openshift-monitoring/console-test-monitoring/0", true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
1b54628f-8d64-4abf-a043-351892a3685f
|
Author:juzhao-Medium-62636-Graduate alert overrides and alert relabelings to GA
|
['"path/filepath"', 'g "github.com/onsi/ginkgo/v2"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-62636-Graduate alert overrides and alert relabelings to GA", func() {
var (
alertingRule = filepath.Join(monitoringBaseDir, "alertingRule.yaml")
alertRelabelConfig = filepath.Join(monitoringBaseDir, "alertRelabelConfig.yaml")
)
exutil.By("delete the created AlertingRule/AlertRelabelConfig at the end of the case")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("AlertingRule", "monitoring-example", "-n", "openshift-monitoring").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("AlertRelabelConfig", "monitoring-watchdog", "-n", "openshift-monitoring").Execute()
exutil.By("check AlertingRule/AlertRelabelConfig apiVersion is v1")
_, explainErr := oc.WithoutNamespace().AsAdmin().Run("explain").Args("AlertingRule", "--api-version=monitoring.openshift.io/v1").Output()
o.Expect(explainErr).NotTo(o.HaveOccurred())
_, explainErr = oc.WithoutNamespace().AsAdmin().Run("explain").Args("AlertRelabelConfig", "--api-version=monitoring.openshift.io/v1").Output()
o.Expect(explainErr).NotTo(o.HaveOccurred())
exutil.By("create AlertingRule/AlertRelabelConfig under openshift-monitoring")
createResourceFromYaml(oc, "openshift-monitoring", alertingRule)
createResourceFromYaml(oc, "openshift-monitoring", alertRelabelConfig)
exutil.By("check AlertingRule/AlertRelabelConfig are created")
output, _ := oc.WithoutNamespace().Run("get").Args("AlertingRule/monitoring-example", "-ojsonpath={.metadata.name}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("monitoring-example"))
output, _ = oc.WithoutNamespace().Run("get").Args("AlertRelabelConfig/monitoring-watchdog", "-ojsonpath={.metadata.name}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("monitoring-watchdog"))
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("check the alert defined in AlertingRule could be found in thanos-querier API")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="ExampleAlert"}'`, token, `"alertname":"ExampleAlert"`, 2*platformLoadTime)
exutil.By("Watchdog alert, the alert label is changed from \"severity\":\"none\" to \"severity\":\"critical\" in alertmanager API")
checkMetric(oc, `https://alertmanager-main.openshift-monitoring.svc:9094/api/v2/alerts?&filter={alertname="Watchdog"}`, token, `"severity":"critical"`, 2*platformLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
85bcfa33-f9c2-41b0-868d-3e04284d1d42
|
Author:tagao-Low-67008-node-exporter: disable btrfs collector
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Low-67008-node-exporter: disable btrfs collector", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("should not see btrfs collector related metrics")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_scrape_collector_success{collector="btrfs"}'`, token, "\"result\":[]", uwmLoadTime)
exutil.By("check btrfs collector is disabled by default")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("daemonset.apps/node-exporter", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"node-exporter\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("no-collector.btrfs"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0f0e36b2-c67c-4e27-9901-db73becd90b7
|
Author:tagao-LEVEL0-Medium-68292-Limit the value of GOMAXPROCS on node-exporter to 4
|
['"fmt"', '"os/exec"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-LEVEL0-Medium-68292-Limit the value of GOMAXPROCS on node-exporter to 4", func() {
exutil.By("check the gomaxprocs value in logs")
// % oc -n openshift-monitoring logs -l app.kubernetes.io/name=node-exporter --tail=-1 -c node-exporter | grep -o 'gomaxprocs=[0-9]*' | uniq | cut -d= -f2
nodeExporterLogs, errLogs := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-l", "app.kubernetes.io/name=node-exporter", "--tail=-1", "-c", "node-exporter", "-n", "openshift-monitoring").OutputToFile("OCP-68292_nodeExporter.log")
o.Expect(errLogs).NotTo(o.HaveOccurred())
cmd := fmt.Sprintf(`cat %v | grep -o '%s' | uniq | cut -d= -f2`, nodeExporterLogs, "gomaxprocs=[0-9]*")
gomaxprocsValue, err := exec.Command("bash", "-c", cmd).Output()
e2e.Logf("gomaxprocsValue output: %s", gomaxprocsValue)
gomaxprocsNum, _ := strconv.Atoi(string(gomaxprocsValue))
o.Expect(gomaxprocsNum).To(o.BeNumerically("<=", 4))
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
6775644a-818e-4bfa-8495-f5dbc44bf19b
|
Author:juzhao-Low-68958-node_exporter shouldn't collect metrics for Calico Virtual NICs
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Low-68958-node_exporter shouldn't collect metrics for Calico Virtual NICs", func() {
exutil.By("Get token of SA prometheus-k8s")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
exutil.By("should not see metrics for Calico Virtual NICs")
checkMetric(oc, `https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=node_network_info{device=~"cali.*"}'`, token, "\"result\":[]", uwmLoadTime)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
2c2c1563-783b-446e-9d8e-1537c2d32001
|
Author:tagao-Medium-69087-Replace OAuth-proxy container with kube-rbac-proxy in Thanos-Querier pod
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-69087-Replace OAuth-proxy container with kube-rbac-proxy in Thanos-Querier pod", func() {
exutil.By("check role added")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("role", "cluster-monitoring-metrics-api", "-n", "openshift-monitoring").Output()
o.Expect(output).NotTo(o.ContainSubstring("NotFound"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check cluster role added")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterRole", "cluster-monitoring-view", "-ojsonpath={.rules}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("monitoring.coreos.com"))
o.Expect(err).NotTo(o.HaveOccurred())
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterRole", "prometheus-k8s", "-ojsonpath={.rules[?(\"monitoring.coreos.com\")]}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("monitoring.coreos.com"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check thanos-querier deployment")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "thanos-querier", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"kube-rbac-proxy-web\")].args}", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("kube-rbac-proxy/config.yaml"))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check thanos-querier secret")
// should see `thanos-querier-kube-rbac-proxy-web` is added, and `thanos-querier-oauth-cookie` is removed
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "thanos-querier-kube-rbac-proxy-web", "-n", "openshift-monitoring").Output()
o.Expect(output).NotTo(o.ContainSubstring("NotFound"))
o.Expect(err).NotTo(o.HaveOccurred())
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "thanos-querier-oauth-cookie", "-n", "openshift-monitoring").Output()
o.Expect(output).To(o.ContainSubstring("NotFound"))
exutil.By("Get token of current user")
token := oc.UserConfig().BearerToken
exutil.By("Get route of thanos-querier")
host, hostErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "thanos-querier", "-ojsonpath={.spec.host}", "-n", "openshift-monitoring").Output()
o.Expect(hostErr).NotTo(o.HaveOccurred())
exutil.By("test role can NOT access to ThanosQuerier")
// % curl -H "Authorization: Bearer $token" -k "https://$host/api/v1/query?" --data-urlencode 'query=up{namespace="openshift-monitoring"}'
checkMetric(oc, "https://"+host+"/api/v1/query? --data-urlencode 'query=up{namespace=\"openshift-monitoring\"}'", token, "Forbidden", 2*platformLoadTime)
exutil.By("add role access to ThanosQuerier")
admErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "cluster-monitoring-metrics-api", oc.Username()).Execute()
o.Expect(admErr).NotTo(o.HaveOccurred())
exutil.By("test role access to ThanosQuerier")
// % curl -H "Authorization: Bearer $token" -k "https://$host/api/v1/query?" --data-urlencode 'query=up{namespace="openshift-monitoring"}'
checkMetric(oc, "https://"+host+"/api/v1/query? --data-urlencode 'query=up{namespace=\"openshift-monitoring\"}'", token, "up", 2*platformLoadTime)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
efae0a02-8495-4b46-ac1f-906a21a06842
|
Author:juzhao-Medium-69924-Set scrape.timestamp tolerance for prometheus
|
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-69924-Set scrape.timestamp tolerance for prometheus", func() {
exutil.By("confirm in-cluster prometheus is created")
err := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 180*time.Second, false, func(context.Context) (bool, error) {
prometheus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", "-n", "openshift-monitoring").Output()
if err != nil || strings.Contains(prometheus, "not found") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, "in-cluster prometheus is not created")
exutil.By("check in-cluster prometheus scrape.timestamp tolerance")
cmd := `-ojsonpath={.spec.additionalArgs[?(@.name=="scrape.timestamp-tolerance")]}`
checkYamlconfig(oc, "openshift-monitoring", "prometheus", "k8s", cmd, `"value":"15ms"`, true)
//check settings in prometheus pods
podNames, err := exutil.GetAllPodsWithLabel(oc, "openshift-monitoring", "app.kubernetes.io/name=prometheus")
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range podNames {
cmd := "-ojsonpath={.spec.containers[?(@.name==\"prometheus\")].args}"
checkYamlconfig(oc, "openshift-monitoring", "pod", pod, cmd, `--scrape.timestamp-tolerance=15ms`, true)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
69226df7-967f-47ca-9568-38264ff2da32
|
Author:juzhao-Medium-70051-Adjust NodeClock alerting rules to be inactive when the PTP operator is installed
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-70051-Adjust NodeClock alerting rules to be inactive when the PTP operator is installed", func() {
exutil.By("check NodeClockSkewDetected alert expr")
cmd := "-ojsonpath={.spec.groups[*].rules[?(@.alert==\"NodeClockSkewDetected\")].expr}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "node-exporter-rules", cmd, `absent(up{job="ptp-monitor-service"})`, true)
exutil.By("check NodeClockNotSynchronising alert expr")
cmd = "-ojsonpath={.spec.groups[*].rules[?(@.alert==\"NodeClockNotSynchronising\")].expr}"
checkYamlconfig(oc, "openshift-monitoring", "prometheusrules", "node-exporter-rules", cmd, `absent(up{job="ptp-monitor-service"})`, true)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
613a09b0-9770-4626-bf5d-fed1de3b3e4c
|
Author:juzhao-Medium-69927-Allow to query alerts of application namespaces as an application user from command line
|
['"context"', '"os/exec"', '"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-69927-Allow to query alerts of application namespaces as an application user from command line", func() {
_, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "cluster-admin", oc.Username()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-cluster-role-from-user", "cluster-admin", oc.Username()).Execute()
podNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=prometheus", "--ignore-not-found", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// double check prometheus pods are Running
for _, pod := range strings.Fields(podNames) {
assertPodToBeReady(oc, pod, "openshift-monitoring")
}
podNames, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-monitoring", "-l", "app.kubernetes.io/name=thanos-query", "--ignore-not-found", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// double check thanos-querier pods are Running
for _, pod := range strings.Fields(podNames) {
assertPodToBeReady(oc, pod, "openshift-monitoring")
}
exutil.By("get user API token")
token, _ := oc.Run("whoami").Args("-t").Output()
exutil.By("Run port-forward command")
cmd, _, _, err := oc.AsAdmin().WithoutNamespace().Run("port-forward").Args("-n", "openshift-monitoring", "service/thanos-querier", "9093:9093").Background()
o.Expect(err).NotTo(o.HaveOccurred())
defer cmd.Process.Kill()
output, err := exec.Command("bash", "-c", "ps -ef | grep 9093").Output()
e2e.Logf("output is: %s", output)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("curl without namespace parameter should return Bad Request")
curlcmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + "https://127.0.0.1:9093/api/v1/alerts"
err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 30*time.Second, false, func(context.Context) (bool, error) {
output, err := exec.Command("bash", "-c", curlcmd).Output()
e2e.Logf("output is: %s", output)
if err != nil {
e2e.Logf("failed to execute the curl: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString("Bad Request", string(output)); matched {
e2e.Logf("Bad Request. The request or configuration is malformed\n")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "failed to curl without namespace parameter")
exutil.By("curl with namespace parameter should return alerts")
err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 30*time.Second, false, func(context.Context) (bool, error) {
output, err := exec.Command("bash", "-c", curlcmd+"?namespace=openshift-monitoring").Output()
e2e.Logf("output is: %s", output)
if err != nil {
e2e.Logf("failed to execute the curl: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(`"alertname":"Watchdog"`, string(output)); matched {
e2e.Logf("curl with namespace parameter returns Watchdog alert\n")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Cannot get result with namespace parameter")
})
| |||||
test case
|
openshift/openshift-tests-private
|
035e1cd6-4a36-4b54-8541-a69dc3796077
|
Author:tagao-Medium-69195-Replace OAuth-proxy container with Kube-RBAC-proxy in Prometheus pod
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-69195-Replace OAuth-proxy container with Kube-RBAC-proxy in Prometheus pod", func() {
exutil.By("check prometheus-k8s-kube-rbac-proxy-web added")
checkSecret, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "prometheus-k8s-kube-rbac-proxy-web", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).NotTo(o.ContainSubstring("not found"))
exutil.By("check secret prometheus-k8s-proxy removed")
checkSecret, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "prometheus-k8s-proxy", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).To(o.ContainSubstring("not found"))
exutil.By("check prometheus k8s configs, kube-rbac-proxy-web related configs should exist")
checkPrometheusK8s, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-web\")].ports}", "-n", "openshift-monitoring").Output()
o.Expect(checkPrometheusK8s).To(o.ContainSubstring("9091"))
o.Expect(checkPrometheusK8s).To(o.ContainSubstring("web"))
checkPrometheusK8s, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-web\")].volumeMounts}", "-n", "openshift-monitoring").Output()
o.Expect(checkPrometheusK8s).To(o.ContainSubstring("secret-prometheus-k8s-kube-rbac-proxy-web"))
checkPrometheusK8s, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("prometheus", "k8s", "-ojsonpath={.spec.secrets}", "-n", "openshift-monitoring").Output()
o.Expect(checkPrometheusK8s).To(o.ContainSubstring("prometheus-k8s-kube-rbac-proxy-web"))
exutil.By("check prometheus k8s pods, prometheus-proxy container is removed")
checkPO, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "prometheus-k8s-0", "-ojsonpath={.spec.containers[*].name}", "-n", "openshift-monitoring").Output()
o.Expect(checkPO).NotTo(o.ContainSubstring("prometheus-proxy"))
exutil.By("check prometheus-k8s servicemonitor, port should be keep at metrics")
checkSM, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ServiceMonitor", "prometheus-k8s", "-ojsonpath={.spec.endpoints[]}", "-n", "openshift-monitoring").Output()
o.Expect(checkSM).To(o.ContainSubstring(`"port":"metrics"`))
exutil.By("check telemeter-client deploy")
checkTL, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "telemeter-client", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"telemeter-client\")].env[?(@.name==\"FROM\")]}", "-n", "openshift-monitoring").Output()
if !strings.Contains(checkTL, `"telemeter-client" not found`) {
o.Expect(checkTL).To(o.ContainSubstring(`"value":"https://prometheus-k8s.openshift-monitoring.svc:9091"`))
}
exutil.By("check secret thanos-querier-kube-rbac-proxy-metrics")
checkSecret, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "thanos-querier-kube-rbac-proxy-metrics", "-ojsonpath={.metadata.labels}", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).To(o.ContainSubstring(`"app.kubernetes.io/component":"query-layer"`))
o.Expect(checkSecret).To(o.ContainSubstring(`"app.kubernetes.io/instance":"thanos-querier"`))
exutil.By("check secret thanos-querier-kube-rbac-proxy-web")
checkSecret, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "thanos-querier-kube-rbac-proxy-web", "-ojsonpath={.metadata.labels}", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).To(o.ContainSubstring(`"app.kubernetes.io/component":"query-layer"`))
o.Expect(checkSecret).To(o.ContainSubstring(`"app.kubernetes.io/instance":"thanos-querier"`))
exutil.By("test role access to prometheus-k8s")
exutil.By("Get token of current user")
token := oc.UserConfig().BearerToken
exutil.By("Get route of prometheus-k8s")
host, hostErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "prometheus-k8s", "-ojsonpath={.spec.host}", "-n", "openshift-monitoring").Output()
o.Expect(hostErr).NotTo(o.HaveOccurred())
exutil.By("test role can NOT access to prometheus-k8s")
// % curl -H "Authorization: Bearer $token" -k "https://$host/api/v1/query?" --data-urlencode 'query=up{namespace="openshift-monitoring"}'
checkMetric(oc, "https://"+host+"/api/v1/query? --data-urlencode 'query=up{namespace=\"openshift-monitoring\"}'", token, "Forbidden", 2*platformLoadTime)
exutil.By("add role access to prometheus-k8s")
admErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "cluster-monitoring-metrics-api", oc.Username()).Execute()
o.Expect(admErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-role-from-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "cluster-monitoring-metrics-api", oc.Username()).Execute()
exutil.By("test role access to prometheus-k8s")
// % curl -H "Authorization: Bearer $token" -k "https://$host/api/v1/query?" --data-urlencode 'query=up{namespace="openshift-monitoring"}'
checkMetric(oc, "https://"+host+"/api/v1/query? --data-urlencode 'query=up{namespace=\"openshift-monitoring\"}'", token, "up", 2*platformLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
5f907bf6-670a-4f34-a725-9e3ed025a1f3
|
Author:tagao-Medium-72560-Replace oauth-proxy container with kube-rbac-proxy in Alertmanager pods
|
['g "github.com/onsi/ginkgo/v2"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Medium-72560-Replace oauth-proxy container with kube-rbac-proxy in Alertmanager pods", func() {
exutil.By("check new configs added to alertmanager main")
checkAlertmanager, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("alertmanager", "main", "-ojsonpath={.spec.containers[?(@.name==\"kube-rbac-proxy-web\")]}", "-n", "openshift-monitoring").Output()
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"--secure-listen-address=0.0.0.0:9095"`))
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"--upstream=http://127.0.0.1:9093"`))
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"--config-file=/etc/kube-rbac-proxy/config.yaml"`))
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"name":"kube-rbac-proxy-web"`))
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"mountPath":"/etc/kube-rbac-proxy"`))
o.Expect(checkAlertmanager).To(o.ContainSubstring(`"name":"secret-alertmanager-kube-rbac-proxy-web"`))
exutil.By("check new secret added and old one removed")
checkSecret, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "alertmanager-kube-rbac-proxy-web", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).NotTo(o.ContainSubstring("not found"))
checkSecret, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "alertmanager-main-proxy", "-n", "openshift-monitoring").Output()
o.Expect(checkSecret).To(o.ContainSubstring("not found"))
exutil.By("check alertmanager pods, alertmanager-proxy container is removed")
checkPO, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "alertmanager-main-0", "-ojsonpath={.spec.containers[*].name}", "-n", "openshift-monitoring").Output()
o.Expect(checkPO).NotTo(o.ContainSubstring("alertmanager-proxy"))
exutil.By("check role, monitoring-alertmanager-edit add new resourceNames")
checkRole, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("role", "monitoring-alertmanager-edit", "-ojsonpath={.rules}", "-n", "openshift-monitoring").Output()
o.Expect(checkRole).To(o.ContainSubstring(`"resourceNames":["main"]`))
o.Expect(checkRole).To(o.ContainSubstring(`"resources":["alertmanagers/api"]`))
o.Expect(checkRole).To(o.ContainSubstring(`"verbs":["*"]`))
exutil.By("test user access to alertmanager")
exutil.By("Get token of current user")
token := oc.UserConfig().BearerToken
exutil.By("Get route of alertmanager-main")
host, hostErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "alertmanager-main", "-ojsonpath={.spec.host}", "-n", "openshift-monitoring").Output()
o.Expect(hostErr).NotTo(o.HaveOccurred())
exutil.By("test role can NOT access to alertmanager")
// % curl -H "Authorization: Bearer $TOKEN" -k "https://$HOST/api/v2/receivers"
checkMetric(oc, "https://"+host+"/api/v2/receivers", token, "Forbidden", 2*platformLoadTime)
exutil.By("add role access to alertmanager")
admErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "monitoring-alertmanager-edit", oc.Username()).Execute()
o.Expect(admErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-role-from-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "monitoring-alertmanager-edit", oc.Username()).Execute()
exutil.By("test role access to alertmanager")
// % curl -H "Authorization: Bearer $TOKEN" -k "https://$HOST/api/v2/receivers"
checkMetric(oc, "https://"+host+"/api/v2/receivers", token, `"name":"Watchdog"`, 2*platformLoadTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
a8b1c8cf-b025-4c26-8828-89f910609fff
|
Author:juzhao-Medium-73294-add role.rbac.authorization.k8s.io/monitoring-alertmanager-view
|
['"os/exec"', '"strings"', '"time"', 'g "github.com/onsi/ginkgo/v2"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-73294-add role.rbac.authorization.k8s.io/monitoring-alertmanager-view", func() {
exutil.By("Check monitoring-alertmanager-view role is created")
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("role", "monitoring-alertmanager-view", "-n", "openshift-monitoring").Execute()
if err != nil {
e2e.Logf("Unable to get role monitoring-alertmanager-view.")
}
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Bind monitoring-alertmanager-view role to user")
admErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-role-to-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "monitoring-alertmanager-view", oc.Username()).Execute()
o.Expect(admErr).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-role-from-user", "--role-namespace=openshift-monitoring", "-n", "openshift-monitoring", "monitoring-alertmanager-view", oc.Username()).Execute()
exutil.By("Get alertmanager-main route")
host, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "alertmanager-main", "-ojsonpath={.spec.host}", "-n", "openshift-monitoring").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get token of current user")
token := oc.UserConfig().BearerToken
exutil.By("Check monitoring-alertmanager-view role can view receivers and alerts API")
checkMetric(oc, "https://"+host+"/api/v2/receivers", token, "Watchdog", 2*platformLoadTime)
checkMetric(oc, "https://"+host+"/api/v2/alerts?&filter={alertname=\"Watchdog\"}", token, "Watchdog", 2*platformLoadTime)
exutil.By("Check monitoring-alertmanager-view role can not silence alert")
currentTime := time.Now()
start := time.Now().UTC().Format("2006-01-02T15:04:05Z")
twoHoursLater := currentTime.Add(2 * time.Hour)
end := twoHoursLater.UTC().Format("2006-01-02T15:04:05Z")
// % curl -k -H "Authorization: Bearer $token" -X POST -d '{"matchers":[{"name":"alertname","value":"Watchdog"}],"startsAt":"'"$start"'","endsAt":"'"$end"'","createdBy":"testuser","comment":"Silence Watchdog alert"}' https://$HOST/api/v2/silences
curlCmd := `curl -k -H "Authorization: Bearer ` + token + `" -X POST -d '{"matchers":[{"name":"alertname","value":"Watchdog"}],"startsAt":"` + start + `","endsAt":"` + end + `","createdBy":"testuser","comment":"Silence Watchdog alert"}' "https://` + host + `/api/v2/silences"`
out, err := exec.Command("bash", "-c", curlCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(out), "Forbidden")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
ee55bcaa-72d2-4e5c-8c55-e27b8a6dd9bd
|
Author:juzhao-Medium-73288-Enable request headers flags for metrics server
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:juzhao-Medium-73288-Enable request headers flags for metrics server", func() {
exutil.By("Check metrics-server deployment exists")
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "metrics-server", "-n", "openshift-monitoring").Execute()
if err != nil {
e2e.Logf("Unable to find metrics-server deployment.")
}
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check request headers flags for metrics server")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy/metrics-server", "-ojsonpath={.spec.template.spec.containers[?(@.name==\"metrics-server\")].args}", "-n", "openshift-monitoring").Output()
params := []string{"requestheader-client-ca-file", "requestheader-allowed-names", "requestheader-extra-headers-prefix", "requestheader-group-headers", "requestheader-username-headers"}
for _, param := range params {
o.Expect(output).To(o.ContainSubstring(param))
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
cb9fa849-e649-4365-8777-47ea953bf1ed
|
Author:tagao-Low-30088-User can not deploy ThanosRuler CRs in user namespaces [Serial]
|
['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-Low-30088-User can not deploy ThanosRuler CRs in user namespaces [Serial]", func() {
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("create namespace as a common user (non-admin)")
oc.SetupProject()
ns := oc.Namespace()
exutil.By("check ThanosRuler can not be created")
currentUser, _ := oc.Run("whoami").Args("").Output()
e2e.Logf("current user is: %v", currentUser)
queryErr := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 60*time.Second, true, func(context.Context) (bool, error) {
permissionCheck, _ := oc.WithoutNamespace().Run("auth").Args("can-i", "create", "thanosrulers", "--as="+currentUser, "-n", ns).Output()
if !strings.Contains(permissionCheck, "yes") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(queryErr, "permissionCheck failed to contain \"no\"")
})
| |||||
test case
|
openshift/openshift-tests-private
|
6de29f52-986b-4817-a752-cc8922ecad3d
|
Author:tagao-NonPreRelease-Longduration-Medium-49191-Enforce body_size_limit [Serial]
|
github.com/openshift/openshift-tests-private/test/extended/monitoring/monitoring.go
|
g.It("Author:tagao-NonPreRelease-Longduration-Medium-49191-Enforce body_size_limit [Serial]", func() {
exutil.By("delete uwm-config/cm-config at the end of a serial case")
defer deleteConfig(oc, "user-workload-monitoring-config", "openshift-user-workload-monitoring")
defer deleteConfig(oc, monitoringCM.name, monitoringCM.namespace)
exutil.By("set `enforcedBodySizeLimit` to 0, and check from the k8s pod")
patchAndCheckBodySizeLimit(oc, "0", "0")
exutil.By("set `enforcedBodySizeLimit` to a invalid value, and check from the k8s pod")
patchAndCheckBodySizeLimit(oc, "20MiBPS", "")
exutil.By("set `enforcedBodySizeLimit` to 1MB to trigger PrometheusScrapeBodySizeLimitHit alert, and check from the k8s pod")
patchAndCheckBodySizeLimit(oc, "1MB", "1MB")
exutil.By("check PrometheusScrapeBodySizeLimitHit alert is triggered")
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusScrapeBodySizeLimitHit"}'`, token, "PrometheusScrapeBodySizeLimitHit", 5*uwmLoadTime)
exutil.By("set `enforcedBodySizeLimit` to 40MB, and check from the k8s pod")
patchAndCheckBodySizeLimit(oc, "40MB", "40MB")
exutil.By("check from alert, should not have enforcedBodySizeLimit")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusScrapeBodySizeLimitHit"}'`, token, `"result":[]`, 5*uwmLoadTime)
exutil.By("set `enforcedBodySizeLimit` to automatic, and check from the k8s pod")
patchAndCheckBodySizeLimit(oc, "automatic", "body_size_limit")
exutil.By("check from alert, should not have enforcedBodySizeLimit")
checkMetric(oc, `https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query --data-urlencode 'query=ALERTS{alertname="PrometheusScrapeBodySizeLimitHit"}'`, token, `"result":[]`, 5*uwmLoadTime)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.