element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function
|
openshift/openshift-tests-private
|
c1ed9725-bdd0-471d-a2a5-e4e07f4f4a34
|
deleteNSUsingOCCLI
|
['"sync"']
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf-util.go
|
func deleteNSUsingOCCLI(oc *exutil.CLI, namespace string, wg *sync.WaitGroup) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
wg.Done()
}
|
perfscale
| ||||
test
|
openshift/openshift-tests-private
|
56e25d45-03fe-4c25-a22a-512a7ba626fb
|
ocperf
|
import (
"fmt"
"sync"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf.go
|
package perfscale
import (
"fmt"
"sync"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// author: [email protected]
var _ = g.Describe("[sig-perfscale] PerfScale oc cli perf", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("perfscale-cli", exutil.KubeConfigPath())
ocpPerfAppDeployment string
ocpPerfAppService string
ocPerfAppImageName string
iaasPlatform string
isSNO bool
namespace string
projectCount int
)
g.BeforeEach(func() {
// get IaaS platform
iaasPlatform = exutil.CheckPlatform(oc)
e2e.Logf("Cloud provider is: %v", iaasPlatform)
ocpPerfAppDeployment = exutil.FixturePath("testdata", "perfscale", "oc-perf-deployment.yaml")
ocpPerfAppService = exutil.FixturePath("testdata", "perfscale", "oc-perf-service.yaml")
isSNO = exutil.IsSNOCluster(oc)
})
// author: [email protected]
g.It("Author:liqcui-Medium-22140-Create multiple projects and time various oc commands durations[Serial]", func() {
if isSNO {
g.Skip("Skip Testing on SNO ...")
}
var (
metricName string
metricValueBefore int
metricValueAfter int
)
mo, err := exutil.NewPrometheusMonitor(oc)
o.Expect(err).NotTo(o.HaveOccurred())
masterNodeNames, err := exutil.GetClusterNodesBy(oc, "control-plane")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(masterNodeNames).NotTo(o.BeEmpty())
for i := 0; i < len(masterNodeNames); i++ {
metricString := fmt.Sprintf(`container_memory_rss{container="kube-apiserver",namespace="openshift-kube-apiserver",node="%s"}`, masterNodeNames[i])
tagQueryParams := exutil.MonitorInstantQueryParams{Query: metricString}
metric4MemRSS := mo.InstantQueryWithRetry(tagQueryParams, 15)
metricName, metricValueBefore = exutil.ExtractSpecifiedValueFromMetricData4MemRSS(oc, metric4MemRSS)
e2e.Logf("The value of %s is %d on [%s].", metricName, metricValueBefore, masterNodeNames[i])
}
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
ocPerfAppImageName = getImagestreamImageName(oc, "tests")
if len(ocPerfAppImageName) == 0 {
ocPerfAppImageName = "quay.io/openshifttest/hello-openshift:multiarch"
}
e2e.Logf("ocp perfscale test case ocp-22140 will use below image to test:\n[Image Name]:%s", ocPerfAppImageName)
if iaasPlatform == "ibmcloud" {
projectCount = 25
} else if iaasPlatform == "aws" {
projectCount = 30
} else if iaasPlatform == "azure" || iaasPlatform == "gcp" {
projectCount = 35
} else {
projectCount = 40
}
start := time.Now()
g.By("Try to create projects and deployments")
randStr := exutil.RandStrCustomize("perfscaleqeoclixyz", 8)
nsPattern := randStr + "-%d"
var wg sync.WaitGroup
for i := 0; i < projectCount; i++ {
namespace := fmt.Sprintf(nsPattern, i)
wg.Add(1)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", namespace, "--ignore-not-found").Execute()
go createNSUsingOCCLI(oc, namespace, &wg)
}
wg.Wait() // Wait for all goroutines to finish
checkIfNSIsInExpectedState(oc, projectCount, randStr)
//var wg sync.WaitGroup
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
wg.Add(1)
go createDeploymentServiceUsingOCCLI(oc, namespace, ocpPerfAppService, ocpPerfAppDeployment, ocPerfAppImageName, &wg)
}
wg.Wait()
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
checkIfDeploymentIsInExpectedState(oc, namespace, "ocp-perfapp")
}
createDuration := time.Since(start).Seconds()
e2e.Logf("Duration for creating %d projects and 1 deploymentConfig in each of those is %.2f seconds", projectCount, createDuration)
start = time.Now()
g.By("Try to get deployment, sa, and secrets")
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
wg.Add(1)
go getResourceUsingOCCLI(oc, namespace, &wg)
}
wg.Wait()
getDuration := time.Since(start).Seconds()
e2e.Logf("Duration for gettings deployment, sa, secrets in each of those is %.2f seconds", getDuration)
start = time.Now()
g.By("Try to scale the dc replicas to 0")
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
wg.Add(1)
go scaleDownDeploymentUsingOCCLI(oc, namespace, &wg)
}
wg.Wait()
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
checkIfDeploymentIsInExpectedState(oc, namespace, "ocp-perfapp")
}
scaleDuration := time.Since(start).Seconds()
e2e.Logf("Duration for scale the dc replicas to 0 in each of those is %.2f seconds", scaleDuration)
start = time.Now()
g.By("Try to delete project")
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
wg.Add(1)
go deleteNSUsingOCCLI(oc, namespace, &wg)
}
wg.Wait()
checkIfNSIsInExpectedState(oc, 0, randStr)
deleteDuration := time.Since(start).Seconds()
for i := 0; i < len(masterNodeNames); i++ {
metricString := fmt.Sprintf(`container_memory_rss{container="kube-apiserver",namespace="openshift-kube-apiserver",node="%s"}`, masterNodeNames[i])
tagQueryParams := exutil.MonitorInstantQueryParams{Query: metricString}
metric4MemRSS := mo.InstantQueryWithRetry(tagQueryParams, 15)
metricName, metricValueAfter = exutil.ExtractSpecifiedValueFromMetricData4MemRSS(oc, metric4MemRSS)
e2e.Logf("The value of %s is %d on [%s].", metricName, metricValueAfter, masterNodeNames[i])
if metricValueAfter > metricValueBefore {
e2e.Logf("The value of %s increased from %d to %d on [%s].", metricName, metricValueBefore, metricValueAfter, masterNodeNames[i])
}
//Lower than 3GB=3X1024X1024X1024=3221225472, more memory in 4.16
o.Expect(metricValueAfter).To(o.BeNumerically("<=", 3221225472))
}
// e2e.Logf("Duration for deleting %d projects and 1 deploymentConfig in each of those is %.2f seconds", projectCount, deleteDuration)
// all values in BeNumerically are "Expected" and "Threshold" numbers
// Expected derived by running this program 5 times against 4.8.0-0.nightly-2021-10-20-155651 and taking median
// Threshold is set to lower than the expected value
e2e.Logf("createDuration is: %v Expected time is less than 360s.", createDuration)
e2e.Logf("getDuration is: %v Expected time is less than 120s.", getDuration)
e2e.Logf("scaleDuration is: %v Expected time is less than 120s.", scaleDuration)
e2e.Logf("deleteDuration is: %v Expected time is less than 300s.", deleteDuration)
o.Expect(createDuration).To(o.BeNumerically("<=", 360))
o.Expect(getDuration).To(o.BeNumerically("<=", 120))
o.Expect(scaleDuration).To(o.BeNumerically("<=", 120))
o.Expect(deleteDuration).To(o.BeNumerically("<=", 300))
})
})
|
package perfscale
| ||||
test case
|
openshift/openshift-tests-private
|
1f4afeee-a49e-4843-a449-4ba4ddad5fcd
|
Author:liqcui-Medium-22140-Create multiple projects and time various oc commands durations[Serial]
|
['"fmt"', '"sync"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/perfscale/ocperf.go
|
g.It("Author:liqcui-Medium-22140-Create multiple projects and time various oc commands durations[Serial]", func() {
if isSNO {
g.Skip("Skip Testing on SNO ...")
}
var (
metricName string
metricValueBefore int
metricValueAfter int
)
mo, err := exutil.NewPrometheusMonitor(oc)
o.Expect(err).NotTo(o.HaveOccurred())
masterNodeNames, err := exutil.GetClusterNodesBy(oc, "control-plane")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(masterNodeNames).NotTo(o.BeEmpty())
for i := 0; i < len(masterNodeNames); i++ {
metricString := fmt.Sprintf(`container_memory_rss{container="kube-apiserver",namespace="openshift-kube-apiserver",node="%s"}`, masterNodeNames[i])
tagQueryParams := exutil.MonitorInstantQueryParams{Query: metricString}
metric4MemRSS := mo.InstantQueryWithRetry(tagQueryParams, 15)
metricName, metricValueBefore = exutil.ExtractSpecifiedValueFromMetricData4MemRSS(oc, metric4MemRSS)
e2e.Logf("The value of %s is %d on [%s].", metricName, metricValueBefore, masterNodeNames[i])
}
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
ocPerfAppImageName = getImagestreamImageName(oc, "tests")
if len(ocPerfAppImageName) == 0 {
ocPerfAppImageName = "quay.io/openshifttest/hello-openshift:multiarch"
}
e2e.Logf("ocp perfscale test case ocp-22140 will use below image to test:\n[Image Name]:%s", ocPerfAppImageName)
if iaasPlatform == "ibmcloud" {
projectCount = 25
} else if iaasPlatform == "aws" {
projectCount = 30
} else if iaasPlatform == "azure" || iaasPlatform == "gcp" {
projectCount = 35
} else {
projectCount = 40
}
start := time.Now()
g.By("Try to create projects and deployments")
randStr := exutil.RandStrCustomize("perfscaleqeoclixyz", 8)
nsPattern := randStr + "-%d"
var wg sync.WaitGroup
for i := 0; i < projectCount; i++ {
namespace := fmt.Sprintf(nsPattern, i)
wg.Add(1)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", namespace, "--ignore-not-found").Execute()
go createNSUsingOCCLI(oc, namespace, &wg)
}
wg.Wait() // Wait for all goroutines to finish
checkIfNSIsInExpectedState(oc, projectCount, randStr)
//var wg sync.WaitGroup
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
wg.Add(1)
go createDeploymentServiceUsingOCCLI(oc, namespace, ocpPerfAppService, ocpPerfAppDeployment, ocPerfAppImageName, &wg)
}
wg.Wait()
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
checkIfDeploymentIsInExpectedState(oc, namespace, "ocp-perfapp")
}
createDuration := time.Since(start).Seconds()
e2e.Logf("Duration for creating %d projects and 1 deploymentConfig in each of those is %.2f seconds", projectCount, createDuration)
start = time.Now()
g.By("Try to get deployment, sa, and secrets")
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
wg.Add(1)
go getResourceUsingOCCLI(oc, namespace, &wg)
}
wg.Wait()
getDuration := time.Since(start).Seconds()
e2e.Logf("Duration for gettings deployment, sa, secrets in each of those is %.2f seconds", getDuration)
start = time.Now()
g.By("Try to scale the dc replicas to 0")
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
wg.Add(1)
go scaleDownDeploymentUsingOCCLI(oc, namespace, &wg)
}
wg.Wait()
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
checkIfDeploymentIsInExpectedState(oc, namespace, "ocp-perfapp")
}
scaleDuration := time.Since(start).Seconds()
e2e.Logf("Duration for scale the dc replicas to 0 in each of those is %.2f seconds", scaleDuration)
start = time.Now()
g.By("Try to delete project")
for i := 0; i < projectCount; i++ {
namespace = fmt.Sprintf(nsPattern, i)
wg.Add(1)
go deleteNSUsingOCCLI(oc, namespace, &wg)
}
wg.Wait()
checkIfNSIsInExpectedState(oc, 0, randStr)
deleteDuration := time.Since(start).Seconds()
for i := 0; i < len(masterNodeNames); i++ {
metricString := fmt.Sprintf(`container_memory_rss{container="kube-apiserver",namespace="openshift-kube-apiserver",node="%s"}`, masterNodeNames[i])
tagQueryParams := exutil.MonitorInstantQueryParams{Query: metricString}
metric4MemRSS := mo.InstantQueryWithRetry(tagQueryParams, 15)
metricName, metricValueAfter = exutil.ExtractSpecifiedValueFromMetricData4MemRSS(oc, metric4MemRSS)
e2e.Logf("The value of %s is %d on [%s].", metricName, metricValueAfter, masterNodeNames[i])
if metricValueAfter > metricValueBefore {
e2e.Logf("The value of %s increased from %d to %d on [%s].", metricName, metricValueBefore, metricValueAfter, masterNodeNames[i])
}
//Lower than 3GB=3X1024X1024X1024=3221225472, more memory in 4.16
o.Expect(metricValueAfter).To(o.BeNumerically("<=", 3221225472))
}
// e2e.Logf("Duration for deleting %d projects and 1 deploymentConfig in each of those is %.2f seconds", projectCount, deleteDuration)
// all values in BeNumerically are "Expected" and "Threshold" numbers
// Expected derived by running this program 5 times against 4.8.0-0.nightly-2021-10-20-155651 and taking median
// Threshold is set to lower than the expected value
e2e.Logf("createDuration is: %v Expected time is less than 360s.", createDuration)
e2e.Logf("getDuration is: %v Expected time is less than 120s.", getDuration)
e2e.Logf("scaleDuration is: %v Expected time is less than 120s.", scaleDuration)
e2e.Logf("deleteDuration is: %v Expected time is less than 300s.", deleteDuration)
o.Expect(createDuration).To(o.BeNumerically("<=", 360))
o.Expect(getDuration).To(o.BeNumerically("<=", 120))
o.Expect(scaleDuration).To(o.BeNumerically("<=", 120))
o.Expect(deleteDuration).To(o.BeNumerically("<=", 300))
})
| |||||
file
|
openshift/openshift-tests-private
|
4efa1e20-a8af-4af2-a833-cb004282abaf
|
cpu-manager-util
|
import (
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
)
|
github.com/openshift/openshift-tests-private/test/extended/psap/cpu/cpu-manager-util.go
|
package cpu
import (
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
)
func getFirstDrainedMasterNode(oc *exutil.CLI) string {
var (
nodeName string
)
err := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
nodeHostNameStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", `-ojsonpath={.items[?(@.spec.unschedulable==true)].metadata.name}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
nodeHostName := strings.Trim(nodeHostNameStr, "'")
masterNodeNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/master", "-oname").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(nodeHostName) > 0 && strings.Contains(masterNodeNames, nodeHostName) {
nodeName = nodeHostName
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "No any node's status is SchedulingDisabled was found")
return nodeName
}
|
package cpu
| ||||
function
|
openshift/openshift-tests-private
|
6f3c122b-06f0-4ff5-be4b-f0abc31b2f0e
|
getFirstDrainedMasterNode
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/cpu/cpu-manager-util.go
|
func getFirstDrainedMasterNode(oc *exutil.CLI) string {
var (
nodeName string
)
err := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
nodeHostNameStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", `-ojsonpath={.items[?(@.spec.unschedulable==true)].metadata.name}`).Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
nodeHostName := strings.Trim(nodeHostNameStr, "'")
masterNodeNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/master", "-oname").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
if len(nodeHostName) > 0 && strings.Contains(masterNodeNames, nodeHostName) {
nodeName = nodeHostName
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "No any node's status is SchedulingDisabled was found")
return nodeName
}
|
cpu
| ||||
test
|
openshift/openshift-tests-private
|
0e741075-83e4-485c-9697-a0f9ec2ba1e2
|
cpu-manager
|
import (
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/psap/cpu/cpu-manager.go
|
package cpu
import (
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-tuning-node] PSAP should", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("cpumanager-test", exutil.KubeConfigPath())
cpuGuaranteedPodFile string
cpuKubeletconfigMasterFile string
iaasPlatform string
)
g.BeforeEach(func() {
// get IaaS platform
iaasPlatform = exutil.CheckPlatform(oc)
cpuGuaranteedPodFile = exutil.FixturePath("testdata", "psap", "cpu", "cpu-guaranteed-pod.yaml")
cpuKubeletconfigMasterFile = exutil.FixturePath("testdata", "psap", "cpu", "cpu-kubeletconfig-masters.yaml")
})
// author: [email protected]
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:liqcui-Medium-51417-Verify that static pods are not using CPUs reserved for workload with guaranteed CPUs [Disruptive] [Slow]", func() {
// currently test is only supported on AWS, GCP, Azure , ibmcloud and alibabacloud
if iaasPlatform != "aws" && iaasPlatform != "gcp" && iaasPlatform != "azure" && iaasPlatform != "ibmcloud" && iaasPlatform != "alibabacloud" && architecture.ClusterArchitecture(oc).String() != "ppc64le" {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Identify the cpu number of master nodes
firstMasterNode, err := exutil.GetFirstMasterNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
masterNodeCPUNumStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", firstMasterNode, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
masterNodeCPU, err := strconv.Atoi(masterNodeCPUNumStr)
o.Expect(err).NotTo(o.HaveOccurred())
if masterNodeCPU <= 4 {
g.Skip("The master node only have %d cpus, it's not enough, skip testing", masterNodeCPU)
}
//Test on compact 3 nodes first, will move to normal cluster if no too much failure
is3CPNoWorker := exutil.Is3MasterNoDedicatedWorkerNode(oc)
if !is3CPNoWorker {
g.Skip("Only Test on compact 3 node")
}
oc.SetupProject()
cpuTestNS := oc.Namespace()
defer exutil.AssertIfMCPChangesAppliedByName(oc, "master", 1800)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("KubeletConfig", "masters").Output()
g.By("Create KubeConfig masters to enable cpumanager and toplogy manager policy")
exutil.ApplyOperatorResourceByYaml(oc, "", cpuKubeletconfigMasterFile)
firstDrainedNode := getFirstDrainedMasterNode(oc)
e2e.Logf("The first drain master node is [ %v ]", firstDrainedNode)
o.Expect(firstDrainedNode).NotTo(o.BeEmpty())
g.By("Assert if MCP master is ready after enable cpumanager and toplogy manager policy")
exutil.AssertIfMCPChangesAppliedByName(oc, "master", 1800)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", cpuTestNS, "guaranteed-pod", "--ignore-not-found").Execute()
g.By("Create guaranteed pod in temp namespace")
exutil.CreateNsResourceFromTemplate(oc, cpuTestNS, "--ignore-unknown-parameters=true", "-f", cpuGuaranteedPodFile, "-p", "HOST_NAME="+firstDrainedNode)
g.By("Assert guaranteed pod is ready in temp namespace")
exutil.AssertPodToBeReady(oc, "guaranteed-pod", cpuTestNS)
g.By("Get POD Name of static pod etcd")
etcdPODName, err := exutil.GetPodName(oc, "openshift-etcd", "etcd=true", firstDrainedNode)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(etcdPODName).NotTo(o.BeEmpty())
e2e.Logf("The static POD name of etcd is [ %v ]", etcdPODName)
g.By("Get cpuset of static pod etcd")
etcdContainerID := exutil.GetContainerIDByPODName(oc, etcdPODName, "openshift-etcd")
o.Expect(etcdContainerID).NotTo(o.BeEmpty())
e2e.Logf("The container ID of static POD etcd is [ %v ]", etcdContainerID)
staticPODCPUSet := exutil.GetPODCPUSet(oc, "openshift-etcd", firstDrainedNode, etcdContainerID)
e2e.Logf("The static POD cpuset of etcd is [ %v ]", staticPODCPUSet)
o.Expect(staticPODCPUSet).NotTo(o.BeEmpty())
g.By("Assert cpuset of static pod etcd")
//The cpu of guaranteed POD is 1 and 8
//The default cpuset should be 0,2-7,9-15
defaultCpuSet, guaranteedPODCPUs := exutil.CPUManagerStatebyNode(oc, "openshift-etcd", firstDrainedNode, "guaranteed-pod")
guaranteedPODCPU := strings.Split(guaranteedPODCPUs, " ")
e2e.Logf("The guaranteed POD pined CPU is [ %v ]", guaranteedPODCPU)
o.Expect(staticPODCPUSet).To(o.ContainSubstring(defaultCpuSet))
Len := len(guaranteedPODCPU)
for i := 0; i < Len; i++ {
if len(guaranteedPODCPU[i]) != 0 {
cpuNunInt, err := strconv.Atoi(guaranteedPODCPU[i])
o.Expect(err).NotTo(o.HaveOccurred())
expectedStr := strconv.Itoa(cpuNunInt-1) + "," + strconv.Itoa(cpuNunInt+1)
o.Expect(staticPODCPUSet).To(o.ContainSubstring(expectedStr))
}
}
})
})
|
package cpu
| ||||
test case
|
openshift/openshift-tests-private
|
15c91319-f3b9-42fc-8f27-231a9ef31841
|
NonHyperShiftHOST-Longduration-NonPreRelease-Author:liqcui-Medium-51417-Verify that static pods are not using CPUs reserved for workload with guaranteed CPUs [Disruptive] [Slow]
|
['"strconv"', '"strings"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/cpu/cpu-manager.go
|
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-Author:liqcui-Medium-51417-Verify that static pods are not using CPUs reserved for workload with guaranteed CPUs [Disruptive] [Slow]", func() {
// currently test is only supported on AWS, GCP, Azure , ibmcloud and alibabacloud
if iaasPlatform != "aws" && iaasPlatform != "gcp" && iaasPlatform != "azure" && iaasPlatform != "ibmcloud" && iaasPlatform != "alibabacloud" && architecture.ClusterArchitecture(oc).String() != "ppc64le" {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Identify the cpu number of master nodes
firstMasterNode, err := exutil.GetFirstMasterNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
masterNodeCPUNumStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", firstMasterNode, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
masterNodeCPU, err := strconv.Atoi(masterNodeCPUNumStr)
o.Expect(err).NotTo(o.HaveOccurred())
if masterNodeCPU <= 4 {
g.Skip("The master node only have %d cpus, it's not enough, skip testing", masterNodeCPU)
}
//Test on compact 3 nodes first, will move to normal cluster if no too much failure
is3CPNoWorker := exutil.Is3MasterNoDedicatedWorkerNode(oc)
if !is3CPNoWorker {
g.Skip("Only Test on compact 3 node")
}
oc.SetupProject()
cpuTestNS := oc.Namespace()
defer exutil.AssertIfMCPChangesAppliedByName(oc, "master", 1800)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("KubeletConfig", "masters").Output()
g.By("Create KubeConfig masters to enable cpumanager and toplogy manager policy")
exutil.ApplyOperatorResourceByYaml(oc, "", cpuKubeletconfigMasterFile)
firstDrainedNode := getFirstDrainedMasterNode(oc)
e2e.Logf("The first drain master node is [ %v ]", firstDrainedNode)
o.Expect(firstDrainedNode).NotTo(o.BeEmpty())
g.By("Assert if MCP master is ready after enable cpumanager and toplogy manager policy")
exutil.AssertIfMCPChangesAppliedByName(oc, "master", 1800)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", cpuTestNS, "guaranteed-pod", "--ignore-not-found").Execute()
g.By("Create guaranteed pod in temp namespace")
exutil.CreateNsResourceFromTemplate(oc, cpuTestNS, "--ignore-unknown-parameters=true", "-f", cpuGuaranteedPodFile, "-p", "HOST_NAME="+firstDrainedNode)
g.By("Assert guaranteed pod is ready in temp namespace")
exutil.AssertPodToBeReady(oc, "guaranteed-pod", cpuTestNS)
g.By("Get POD Name of static pod etcd")
etcdPODName, err := exutil.GetPodName(oc, "openshift-etcd", "etcd=true", firstDrainedNode)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(etcdPODName).NotTo(o.BeEmpty())
e2e.Logf("The static POD name of etcd is [ %v ]", etcdPODName)
g.By("Get cpuset of static pod etcd")
etcdContainerID := exutil.GetContainerIDByPODName(oc, etcdPODName, "openshift-etcd")
o.Expect(etcdContainerID).NotTo(o.BeEmpty())
e2e.Logf("The container ID of static POD etcd is [ %v ]", etcdContainerID)
staticPODCPUSet := exutil.GetPODCPUSet(oc, "openshift-etcd", firstDrainedNode, etcdContainerID)
e2e.Logf("The static POD cpuset of etcd is [ %v ]", staticPODCPUSet)
o.Expect(staticPODCPUSet).NotTo(o.BeEmpty())
g.By("Assert cpuset of static pod etcd")
//The cpu of guaranteed POD is 1 and 8
//The default cpuset should be 0,2-7,9-15
defaultCpuSet, guaranteedPODCPUs := exutil.CPUManagerStatebyNode(oc, "openshift-etcd", firstDrainedNode, "guaranteed-pod")
guaranteedPODCPU := strings.Split(guaranteedPODCPUs, " ")
e2e.Logf("The guaranteed POD pined CPU is [ %v ]", guaranteedPODCPU)
o.Expect(staticPODCPUSet).To(o.ContainSubstring(defaultCpuSet))
Len := len(guaranteedPODCPU)
for i := 0; i < Len; i++ {
if len(guaranteedPODCPU[i]) != 0 {
cpuNunInt, err := strconv.Atoi(guaranteedPODCPU[i])
o.Expect(err).NotTo(o.HaveOccurred())
expectedStr := strconv.Itoa(cpuNunInt-1) + "," + strconv.Itoa(cpuNunInt+1)
o.Expect(staticPODCPUSet).To(o.ContainSubstring(expectedStr))
}
}
})
| |||||
test
|
openshift/openshift-tests-private
|
6dfd8926-7c53-4db8-a56b-027a090d14ad
|
gpu
|
import (
"os"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu.go
|
package gpu
import (
"os"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-node] PSAP should", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("gpu-operator-test", exutil.KubeConfigPath())
gpuDir = exutil.FixturePath("testdata", "psap", "gpu")
iaasPlatform string
gpuMachinesetName = "openshift-psap-qe-gpu"
gpuClusterPolicyName = "gpu-cluster-policy"
ManualPickup = true
)
g.BeforeEach(func() {
// get IaaS platform
iaasPlatform = exutil.CheckPlatform(oc)
// Ensure NFD operator is installed
// Test requires NFD to be installed and an NodeFeatureDiscovery operand instance to be runnning
g.By("Deploy NFD Operator and create NFD operand instance on Openshift Container Platform")
isNodeLabeled := exutil.IsNodeLabeledByNFD(oc)
//If the node has been labeled, the NFD operator and instnace
if isNodeLabeled {
e2e.Logf("NFD installation and node label found! Continuing with test ...")
} else {
e2e.Logf("NFD is not deployed, deploying NFD operator and operand instance")
exutil.InstallNFD(oc, "openshift-nfd")
//Check if the NFD Operator installed in namespace openshift-nfd
exutil.WaitOprResourceReady(oc, "deployment", "nfd-controller-manager", "openshift-nfd", true, false)
//create NFD instance in openshift-nfd
exutil.CreateNFDInstance(oc, "openshift-nfd")
}
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:wabouham-Medium-48452-Deploy NVIDIA GPU Operator with DTK without cluster-wide entitlement via yaml files[Slow]", func() {
// currently test is only supported on AWS
if iaasPlatform != "aws" {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet and is only supported on AWS - skipping test ...")
}
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
// Code here to check for GPU instance and create a new machineset and substitute name and instance type to g4dn.xlarge
g.By("Check if we have an existing \"g4dn\" GPU enabled worker node, if not reate a new machineset of instance type \"g4dn.xlarge\" on OCP")
checkGPU, err := checkIfWorkerNodesHaveGPUInstances(oc, "g4dn")
o.Expect(err).NotTo(o.HaveOccurred())
// For clean up GPU machineset in case of error during test case execution or after testcase completes execution
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args(exutil.MapiMachineset, gpuMachinesetName, "-n", "openshift-machine-api", "--ignore-not-found").Execute()
if !checkGPU {
e2e.Logf("No worker node detected with GPU instance, creating a g4dn.xlarge machineset ...")
createMachinesetbyInstanceType(oc, gpuMachinesetName, "g4dn.xlarge")
// Verify new node was created and is running
clusterinfra.WaitForMachinesRunning(oc, 1, gpuMachinesetName)
e2e.Logf("Newly created GPU machineset name: %v", gpuMachinesetName)
// Check that the NFD labels are created
ocDescribeNodes, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("node").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ocDescribeNodes).To(o.ContainSubstring("feature.node.kubernetes.io/pci-10de.present=true"))
} else {
e2e.Logf("At least one worker node detected with GPU instance, continuing with test ...")
}
g.By("Get the subscription channel and csv version names")
gpuOperatorDefaultChannelOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests/gpu-operator-certified", "-n", "openshift-marketplace", "-o", "jsonpath='{.status.defaultChannel}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
gpuOperatorDefaultChannelOutputSpace := strings.Trim(gpuOperatorDefaultChannelOutput, "'")
gpuOperatorDefaultChannel := strings.Trim(gpuOperatorDefaultChannelOutputSpace, " ")
e2e.Logf("GPU Operator default channel is: %v", gpuOperatorDefaultChannel)
// Get the GPU Operator CSV name
gpuOperatorCsvNameStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests", "gpu-operator-certified", "-n", "openshift-marketplace", "-ojsonpath={.status..currentCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// oc get packagemanifest gpu-operator-certified -n openshift-marketplace -o jsonpath='{.status..currentCSV}' -- output returns currentCSV string two occurences:
// gpu-operator-certified.v1.10.1 gpu-operator-certified.v1.10.1
gpuOperatorCsvNameArray := strings.Split(gpuOperatorCsvNameStr, " ")
gpuOperatorCsvName := gpuOperatorCsvNameArray[0]
e2e.Logf("GPU Operator CSV name is: %v", gpuOperatorCsvName)
subTemplate := filepath.Join(gpuDir, "gpu-operator-subscription.yaml")
sub := subResource{
name: "gpu-operator-certified",
namespace: gpuOperatorNamespace,
channel: gpuOperatorDefaultChannel,
template: subTemplate,
startingCSV: gpuOperatorCsvName,
}
// Using defer to cleanup after testcase execution or in event of a testcase failure
// defer statements are exuted in "Last in - First out" order, with last one exeucted first
// so REVERSE of normal order of resource deletion
defer exutil.CleanupOperatorResourceByYaml(oc, "", gpuOperatorNamespaceFile)
defer exutil.CleanupOperatorResourceByYaml(oc, "", gpuOperatorGroupFile)
// sub.delete(oc) will also delete the installed CSV
defer sub.delete(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterPolicy", gpuClusterPolicyName).Execute()
defer exutil.CleanupOperatorResourceByYaml(oc, gpuOperatorNamespace, gpuBurnWorkloadFile)
g.By("Get cluster version")
clusterVersion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cluster Version: %v", clusterVersion)
g.By("Run 'oc get node'")
ocGetNodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
// after error checking, we log the output in the console
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Output: %v", ocGetNodes)
// check for labeled GPU worker nodes
g.By("Run 'oc describe node | grep feature | grep 10de'")
ocCommandWithPipeCmdsOutput := runOcCommandWithPipeCmd(oc, "describe", "node", " | grep feature | grep 10de")
e2e.Logf("Running 'oc describe node | grep feature | grep 10de' output: %v", ocCommandWithPipeCmdsOutput)
o.Expect(ocCommandWithPipeCmdsOutput).To(o.ContainSubstring("feature.node.kubernetes.io/pci-10de.present=true"))
g.By("Run 'oc get packagemanifests/gpu-operator-certified -n nvidia-gpu-operator'")
ocGetPackagemanifestOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests/gpu-operator-certified", "-n", "openshift-marketplace").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("oc get packagemanifests/gpu-operator-certified -n nvidia-gpu-operator Output: %v", ocGetPackagemanifestOutput)
// Check if GPU Operator ClusterPolicy is installed and ready
clusterPolicyReady := checkIfGPUOperatorClusterPolicyIsReady(oc, gpuOperatorNamespace)
e2e.Logf("clusterPolicyReady: %v", clusterPolicyReady)
if clusterPolicyReady {
e2e.Logf("clusterPolicyReady is true, cleaning up, undeploying GPU operator resources first, and re-deploying GPU operator")
ocDeleteClusterPolicyOutput, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterPolicy", gpuClusterPolicyName).Output()
// after error checking, we log the output in the console
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ocGetPodsNvidiaGpuOperator output: \n%v", ocDeleteClusterPolicyOutput)
exutil.CleanupOperatorResourceByYaml(oc, "", gpuOperatorNamespaceFile)
} else {
e2e.Logf("clusterPolicyReady is false, need to deploy GPU operator")
}
// run oc apply -f <filename.yaml>. Create the nvidia-gpu-operator namespace
g.By("Create namespace nvidia-gpu-operator from gpu-operator-namespace.yaml")
exutil.ApplyOperatorResourceByYaml(oc, "", gpuOperatorNamespaceFile)
g.By("Create GPU Operator OperatorGroup from yaml file")
exutil.ApplyOperatorResourceByYaml(oc, "", gpuOperatorGroupFile)
g.By("Create GPU Operator Subscription from yaml file")
sub.createIfNotExist(oc)
// The only deployment is the gpu-operator
// gpu-operator 1/1 1 1 7m56s
g.By("Wait for gpu-operator deployment to be ready")
exutil.WaitOprResourceReady(oc, "deployment", "gpu-operator", gpuOperatorNamespace, true, false)
baseDir, err := os.MkdirTemp("/tmp/", "tmp_48452")
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(baseDir)
extractedClusterPolicyFileName := filepath.Join(baseDir, "clusterPolicy-48452-after-jq.json")
createClusterPolicyJSONFromCSV(oc, gpuOperatorNamespace, gpuOperatorCsvName, extractedClusterPolicyFileName)
g.By("Create GPU Operator ClusterPolicy from extracted json file from csv")
exutil.ApplyOperatorResourceByYaml(oc, "", extractedClusterPolicyFileName)
g.By("Run 'oc get clusterPolicy'")
ocGetClusterPolicyOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterPolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ocGetClusterPolicyOutput: \n%v", ocGetClusterPolicyOutput)
// Damemonsets are:
// ----------------
// gpu-feature-discovery 1 1 1 1 1 nvidia.com/gpu.deploy.gpu-feature-discovery=true 7m50s
// nvidia-container-toolkit-daemonset 1 1 1 1 1 nvidia.com/gpu.deploy.container-toolkit=true 7m51s
// nvidia-dcgm 1 1 1 1 1 nvidia.com/gpu.deploy.dcgm=true 7m51s
// nvidia-dcgm-exporter 1 1 1 1 1 nvidia.com/gpu.deploy.dcgm-exporter=true 7m50s
// nvidia-device-plugin-daemonset 1 1 1 1 1 nvidia.com/gpu.deploy.device-plugin=true 7m51s
// nvidia-driver-daemonset-410.84.202203290245-0 1 1 1 1 1 feature.node.kubernetes.io/system-os_release.OSTREE_VERSION=410.84.202203290245-0,nvidia.com/gpu.deploy.driver=true 7m51s
// nvidia-mig-manager 0 0 0 0 0 nvidia.com/gpu.deploy.mig-manager=true 7m50s
// nvidia-node-status-exporter 1 1 1 1 1 nvidia.com/gpu.deploy.node-status-exporter=true 7m51s
// nvidia-operator-validator
g.By("Wait for the daemonsets in the GPU operator namespace to be ready")
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-container-toolkit-daemonset", gpuOperatorNamespace, true, false)
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-dcgm", gpuOperatorNamespace, true, false)
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-dcgm-exporter", gpuOperatorNamespace, true, false)
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-device-plugin-daemonset", gpuOperatorNamespace, true, false)
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-node-status-exporter", gpuOperatorNamespace, true, false)
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-operator-validator", gpuOperatorNamespace, true, false)
ocGetPodsNvidiaGpuOperator, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "nvidia-gpu-operator").Output()
// after error checking, we log the output in the console
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ocGetPodsNvidiaGpuOperator output: \n%v", ocGetPodsNvidiaGpuOperator)
g.By("Run oc describe node | grep gpu command before running the gpu-burn workload")
// Check if the GPU device plugins are showing up in oc describe node and nvidia.com/gpu: 0
ocDescribeNodeGpuInstance := runOcCommandWithPipeCmd(oc, "describe", "node", " | grep gpu")
e2e.Logf("Before running gpu-burn workload, output of oc describe node grepping for gpu:: \n%v", ocDescribeNodeGpuInstance)
o.Expect(ocDescribeNodeGpuInstance).To(o.ContainSubstring("nvidia"))
o.Expect(ocDescribeNodeGpuInstance).To(o.ContainSubstring("nvidia.com/gpu 0 0"))
// Deploy the gpu-burn workload gpu-burn-resource.yaml
g.By("Deploy the gpu-burn workload gpu-burn-resource.yaml file")
exutil.ApplyOperatorResourceByYaml(oc, gpuOperatorNamespace, gpuBurnWorkloadFile)
exutil.WaitOprResourceReady(oc, "daemonset", "gpu-burn-daemonset", gpuOperatorNamespace, true, false)
assertGPUBurnApp(oc, gpuOperatorNamespace, "gpu-burn-daemonset")
// Check if the GPU device plugins are showing up in oc describe node and nvidia.com/gpu: 1
g.By("Run oc describe node | grep gpu command after running the gpu-burn workload")
ocDescribeNodeGpuInstance1 := runOcCommandWithPipeCmd(oc, "describe", "node", " | grep gpu")
e2e.Logf("After running gpu-burn workload, output of oc describe node grepping for gpu: \n%v", ocDescribeNodeGpuInstance1)
o.Expect(ocDescribeNodeGpuInstance1).To(o.ContainSubstring("nvidia.com/gpu 1 1"))
})
})
|
package gpu
| ||||
test case
|
openshift/openshift-tests-private
|
3f8cb202-599c-4a11-9ab2-2554bc69757f
|
Longduration-NonPreRelease-Author:wabouham-Medium-48452-Deploy NVIDIA GPU Operator with DTK without cluster-wide entitlement via yaml files[Slow]
|
['"os"', '"path/filepath"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu.go
|
g.It("Longduration-NonPreRelease-Author:wabouham-Medium-48452-Deploy NVIDIA GPU Operator with DTK without cluster-wide entitlement via yaml files[Slow]", func() {
// currently test is only supported on AWS
if iaasPlatform != "aws" {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet and is only supported on AWS - skipping test ...")
}
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
// Code here to check for GPU instance and create a new machineset and substitute name and instance type to g4dn.xlarge
g.By("Check if we have an existing \"g4dn\" GPU enabled worker node, if not reate a new machineset of instance type \"g4dn.xlarge\" on OCP")
checkGPU, err := checkIfWorkerNodesHaveGPUInstances(oc, "g4dn")
o.Expect(err).NotTo(o.HaveOccurred())
// For clean up GPU machineset in case of error during test case execution or after testcase completes execution
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args(exutil.MapiMachineset, gpuMachinesetName, "-n", "openshift-machine-api", "--ignore-not-found").Execute()
if !checkGPU {
e2e.Logf("No worker node detected with GPU instance, creating a g4dn.xlarge machineset ...")
createMachinesetbyInstanceType(oc, gpuMachinesetName, "g4dn.xlarge")
// Verify new node was created and is running
clusterinfra.WaitForMachinesRunning(oc, 1, gpuMachinesetName)
e2e.Logf("Newly created GPU machineset name: %v", gpuMachinesetName)
// Check that the NFD labels are created
ocDescribeNodes, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("node").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ocDescribeNodes).To(o.ContainSubstring("feature.node.kubernetes.io/pci-10de.present=true"))
} else {
e2e.Logf("At least one worker node detected with GPU instance, continuing with test ...")
}
g.By("Get the subscription channel and csv version names")
gpuOperatorDefaultChannelOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests/gpu-operator-certified", "-n", "openshift-marketplace", "-o", "jsonpath='{.status.defaultChannel}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
gpuOperatorDefaultChannelOutputSpace := strings.Trim(gpuOperatorDefaultChannelOutput, "'")
gpuOperatorDefaultChannel := strings.Trim(gpuOperatorDefaultChannelOutputSpace, " ")
e2e.Logf("GPU Operator default channel is: %v", gpuOperatorDefaultChannel)
// Get the GPU Operator CSV name
gpuOperatorCsvNameStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests", "gpu-operator-certified", "-n", "openshift-marketplace", "-ojsonpath={.status..currentCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// oc get packagemanifest gpu-operator-certified -n openshift-marketplace -o jsonpath='{.status..currentCSV}' -- output returns currentCSV string two occurences:
// gpu-operator-certified.v1.10.1 gpu-operator-certified.v1.10.1
gpuOperatorCsvNameArray := strings.Split(gpuOperatorCsvNameStr, " ")
gpuOperatorCsvName := gpuOperatorCsvNameArray[0]
e2e.Logf("GPU Operator CSV name is: %v", gpuOperatorCsvName)
subTemplate := filepath.Join(gpuDir, "gpu-operator-subscription.yaml")
sub := subResource{
name: "gpu-operator-certified",
namespace: gpuOperatorNamespace,
channel: gpuOperatorDefaultChannel,
template: subTemplate,
startingCSV: gpuOperatorCsvName,
}
// Using defer to cleanup after testcase execution or in event of a testcase failure
// defer statements are exuted in "Last in - First out" order, with last one exeucted first
// so REVERSE of normal order of resource deletion
defer exutil.CleanupOperatorResourceByYaml(oc, "", gpuOperatorNamespaceFile)
defer exutil.CleanupOperatorResourceByYaml(oc, "", gpuOperatorGroupFile)
// sub.delete(oc) will also delete the installed CSV
defer sub.delete(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterPolicy", gpuClusterPolicyName).Execute()
defer exutil.CleanupOperatorResourceByYaml(oc, gpuOperatorNamespace, gpuBurnWorkloadFile)
g.By("Get cluster version")
clusterVersion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cluster Version: %v", clusterVersion)
g.By("Run 'oc get node'")
ocGetNodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
// after error checking, we log the output in the console
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Output: %v", ocGetNodes)
// check for labeled GPU worker nodes
g.By("Run 'oc describe node | grep feature | grep 10de'")
ocCommandWithPipeCmdsOutput := runOcCommandWithPipeCmd(oc, "describe", "node", " | grep feature | grep 10de")
e2e.Logf("Running 'oc describe node | grep feature | grep 10de' output: %v", ocCommandWithPipeCmdsOutput)
o.Expect(ocCommandWithPipeCmdsOutput).To(o.ContainSubstring("feature.node.kubernetes.io/pci-10de.present=true"))
g.By("Run 'oc get packagemanifests/gpu-operator-certified -n nvidia-gpu-operator'")
ocGetPackagemanifestOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifests/gpu-operator-certified", "-n", "openshift-marketplace").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("oc get packagemanifests/gpu-operator-certified -n nvidia-gpu-operator Output: %v", ocGetPackagemanifestOutput)
// Check if GPU Operator ClusterPolicy is installed and ready
clusterPolicyReady := checkIfGPUOperatorClusterPolicyIsReady(oc, gpuOperatorNamespace)
e2e.Logf("clusterPolicyReady: %v", clusterPolicyReady)
if clusterPolicyReady {
e2e.Logf("clusterPolicyReady is true, cleaning up, undeploying GPU operator resources first, and re-deploying GPU operator")
ocDeleteClusterPolicyOutput, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterPolicy", gpuClusterPolicyName).Output()
// after error checking, we log the output in the console
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ocGetPodsNvidiaGpuOperator output: \n%v", ocDeleteClusterPolicyOutput)
exutil.CleanupOperatorResourceByYaml(oc, "", gpuOperatorNamespaceFile)
} else {
e2e.Logf("clusterPolicyReady is false, need to deploy GPU operator")
}
// run oc apply -f <filename.yaml>. Create the nvidia-gpu-operator namespace
g.By("Create namespace nvidia-gpu-operator from gpu-operator-namespace.yaml")
exutil.ApplyOperatorResourceByYaml(oc, "", gpuOperatorNamespaceFile)
g.By("Create GPU Operator OperatorGroup from yaml file")
exutil.ApplyOperatorResourceByYaml(oc, "", gpuOperatorGroupFile)
g.By("Create GPU Operator Subscription from yaml file")
sub.createIfNotExist(oc)
// The only deployment is the gpu-operator
// gpu-operator 1/1 1 1 7m56s
g.By("Wait for gpu-operator deployment to be ready")
exutil.WaitOprResourceReady(oc, "deployment", "gpu-operator", gpuOperatorNamespace, true, false)
baseDir, err := os.MkdirTemp("/tmp/", "tmp_48452")
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(baseDir)
extractedClusterPolicyFileName := filepath.Join(baseDir, "clusterPolicy-48452-after-jq.json")
createClusterPolicyJSONFromCSV(oc, gpuOperatorNamespace, gpuOperatorCsvName, extractedClusterPolicyFileName)
g.By("Create GPU Operator ClusterPolicy from extracted json file from csv")
exutil.ApplyOperatorResourceByYaml(oc, "", extractedClusterPolicyFileName)
g.By("Run 'oc get clusterPolicy'")
ocGetClusterPolicyOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterPolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ocGetClusterPolicyOutput: \n%v", ocGetClusterPolicyOutput)
// Damemonsets are:
// ----------------
// gpu-feature-discovery 1 1 1 1 1 nvidia.com/gpu.deploy.gpu-feature-discovery=true 7m50s
// nvidia-container-toolkit-daemonset 1 1 1 1 1 nvidia.com/gpu.deploy.container-toolkit=true 7m51s
// nvidia-dcgm 1 1 1 1 1 nvidia.com/gpu.deploy.dcgm=true 7m51s
// nvidia-dcgm-exporter 1 1 1 1 1 nvidia.com/gpu.deploy.dcgm-exporter=true 7m50s
// nvidia-device-plugin-daemonset 1 1 1 1 1 nvidia.com/gpu.deploy.device-plugin=true 7m51s
// nvidia-driver-daemonset-410.84.202203290245-0 1 1 1 1 1 feature.node.kubernetes.io/system-os_release.OSTREE_VERSION=410.84.202203290245-0,nvidia.com/gpu.deploy.driver=true 7m51s
// nvidia-mig-manager 0 0 0 0 0 nvidia.com/gpu.deploy.mig-manager=true 7m50s
// nvidia-node-status-exporter 1 1 1 1 1 nvidia.com/gpu.deploy.node-status-exporter=true 7m51s
// nvidia-operator-validator
g.By("Wait for the daemonsets in the GPU operator namespace to be ready")
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-container-toolkit-daemonset", gpuOperatorNamespace, true, false)
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-dcgm", gpuOperatorNamespace, true, false)
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-dcgm-exporter", gpuOperatorNamespace, true, false)
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-device-plugin-daemonset", gpuOperatorNamespace, true, false)
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-node-status-exporter", gpuOperatorNamespace, true, false)
exutil.WaitOprResourceReady(oc, "daemonset", "nvidia-operator-validator", gpuOperatorNamespace, true, false)
ocGetPodsNvidiaGpuOperator, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "nvidia-gpu-operator").Output()
// after error checking, we log the output in the console
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("ocGetPodsNvidiaGpuOperator output: \n%v", ocGetPodsNvidiaGpuOperator)
g.By("Run oc describe node | grep gpu command before running the gpu-burn workload")
// Check if the GPU device plugins are showing up in oc describe node and nvidia.com/gpu: 0
ocDescribeNodeGpuInstance := runOcCommandWithPipeCmd(oc, "describe", "node", " | grep gpu")
e2e.Logf("Before running gpu-burn workload, output of oc describe node grepping for gpu:: \n%v", ocDescribeNodeGpuInstance)
o.Expect(ocDescribeNodeGpuInstance).To(o.ContainSubstring("nvidia"))
o.Expect(ocDescribeNodeGpuInstance).To(o.ContainSubstring("nvidia.com/gpu 0 0"))
// Deploy the gpu-burn workload gpu-burn-resource.yaml
g.By("Deploy the gpu-burn workload gpu-burn-resource.yaml file")
exutil.ApplyOperatorResourceByYaml(oc, gpuOperatorNamespace, gpuBurnWorkloadFile)
exutil.WaitOprResourceReady(oc, "daemonset", "gpu-burn-daemonset", gpuOperatorNamespace, true, false)
assertGPUBurnApp(oc, gpuOperatorNamespace, "gpu-burn-daemonset")
// Check if the GPU device plugins are showing up in oc describe node and nvidia.com/gpu: 1
g.By("Run oc describe node | grep gpu command after running the gpu-burn workload")
ocDescribeNodeGpuInstance1 := runOcCommandWithPipeCmd(oc, "describe", "node", " | grep gpu")
e2e.Logf("After running gpu-burn workload, output of oc describe node grepping for gpu: \n%v", ocDescribeNodeGpuInstance1)
o.Expect(ocDescribeNodeGpuInstance1).To(o.ContainSubstring("nvidia.com/gpu 1 1"))
})
| |||||
test
|
openshift/openshift-tests-private
|
0bd68dd7-2b43-45d3-aecd-a36e61ee1c53
|
gpu_util
|
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
package gpu
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var (
machinesetNamespace = "openshift-machine-api"
gpuOperatorNamespace = "nvidia-gpu-operator"
gpuOperatorNamespaceFile = exutil.FixturePath("testdata", "psap", "gpu", "gpu-operator-namespace.yaml")
gpuOperatorVersion = "v1.10.1"
gpuOperatorGroupFile = exutil.FixturePath("testdata", "psap", "gpu", "gpu-operator-group.yaml")
gpuOperatorSubscriptionFile = exutil.FixturePath("testdata", "psap", "gpu", "gpu-operator-subscription.yaml")
gpuBurnWorkloadFile = exutil.FixturePath("testdata", "psap", "gpu", "gpu-burn-resource.yaml")
)
// Run oc create -f <filename_yaml_file>.yaml, throws an error if creation fails
func runOcCreateYAML(oc *exutil.CLI, filename string) error {
return oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filename).Execute()
}
func checkIfGPUOperatorClusterPolicyIsReady(oc *exutil.CLI, namespace string) bool {
// oc get clusterPolicy -o jsonpath='{.items[*].status.state}'
// returns: ready
// oc get clusterPolicy
// error: the server doesn't have a resource type "clusterPolicy"
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterPolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "NotFound") || strings.Contains(output, "No resources found") || strings.Contains(output, "doesn't have a resource type") || err != nil {
e2e.Logf("No clusterPolicy was found on this cluster")
return false
}
clusterPolicyState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterPolicy", "-o", "jsonpath='{.items[*].status.state}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// clusterPolicy has single quotes around it: 'ready' from output, need to remove them
clusterPolicyWithoutSingleQuotes := strings.ReplaceAll(clusterPolicyState, "'", "")
e2e.Logf("clusterPolicyState: %v", clusterPolicyWithoutSingleQuotes)
return strings.Compare(clusterPolicyWithoutSingleQuotes, "ready") == 0
}
func runOcCommandWithPipeCmd(oc *exutil.CLI, ocCommand string, ocArgs string, pipeCmdString string) string {
// Run the base command with arguments and capture the output in a file
ocCommandOutputFile, err := oc.AsAdmin().WithoutNamespace().Run(ocCommand).Args(ocArgs).OutputToFile("ocCommandOutputFile.txt")
o.Expect(err).NotTo(o.HaveOccurred())
// Execute a basic bash command, piping the contents of the file into another command and again capturing the output
// Checking here if we have the "10de" label detected on GPU instance
rawOutput, err := exec.Command("bash", "-c", "cat "+ocCommandOutputFile+" "+pipeCmdString).Output()
o.Expect(err).NotTo(o.HaveOccurred())
// we need to format this output before logging it
stringifyOutput := strings.TrimSpace(string(rawOutput))
return stringifyOutput
}
func checkIfWorkerNodesHaveGPUInstances(oc *exutil.CLI, instanceTypePrefix string) (bool, error) {
// GPU enabled worker node instances will have the label from NFD:
// feature.node.kubernetes.io/pci-10de.present=true
// and also have labels:
// oc describe node -l feature.node.kubernetes.io/pci-10de.present=true | grep "instance-type=g4dn.xlarge"
// beta.kubernetes.io/instance-type=g4dn.xlarge
// node.kubernetes.io/instance-type=g4dn.xlarge
// instance-type=g4dn.<size>
// Run the base 'oc describe node` command and capture the output
ocDescribeNodes, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("node", "-l feature.node.kubernetes.io/pci-10de.present=true").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// example lable for g4dn instance type prefix: "node.kubernetes.io/instance-type="g4dn"
gpuInstanceLabel := "node.kubernetes.io/instance-type=" + instanceTypePrefix
instanceTypeMatched := strings.Contains(ocDescribeNodes, gpuInstanceLabel)
if !instanceTypeMatched {
e2e.Logf("No worker nodes with GPU instances were detected")
return false, nil
}
e2e.Logf("At least one worker node contains a GPU with instanceType of prefix %v :", instanceTypePrefix)
return true, nil
}
func assertGPUBurnApp(oc *exutil.CLI, namespace string, gpuDsPodname string) {
// get the gpu-burn daemonset pod name
gpuPodsOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-oname", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
//Filter pod name base on deployment/daemonset name
regexpoprname, _ := regexp.Compile(".*" + gpuDsPodname + ".*")
isMatch := regexpoprname.MatchString(gpuPodsOutput)
gpuPodname := regexpoprname.FindAllString(gpuPodsOutput, -1)
gpuBurnPodName := gpuPodname[0]
e2e.Logf("gpuPodname is : %v", gpuBurnPodName)
// Wait 10 sec in each iteration before condition function () returns true or errors or times out after 12 mins
// Here the body under waitr.Poll(...) is execuded over and over until we timeout or func() returns true or an error.
ocLogsGpuBurnOutput := ""
err1 := wait.Poll(10*time.Second, 12*time.Minute, func() (bool, error) {
// GetPod logs from gpu-burn daemonset. Analyse later, look for "Gflop" and "errors: 0" in pod log
var err error
ocLogsGpuBurnOutput, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args(gpuBurnPodName, "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
isMatch2 := strings.Contains(ocLogsGpuBurnOutput, "Gflop")
isMatch3 := strings.Contains(ocLogsGpuBurnOutput, "errors: 0")
/* gpu-burn-daemonset pod log: example of last lines after execution completes:
96.0% proc'd: 61198 (3484 Gflop/s) errors: 0 temps: 75 C
Summary at: Thu May 12 02:00:27 UTC 2022
100.0% proc'd: 63679 (3518 Gflop/s) errors: 0 temps: 74 C
Killing processes.. done
Tested 1 GPUs:
GPU 0: OK
*/
isMatch4 := strings.Contains(ocLogsGpuBurnOutput, "Tested 1 GPUs")
isMatch5 := strings.Contains(ocLogsGpuBurnOutput, "GPU 0: OK")
isMatch6 := strings.Contains(ocLogsGpuBurnOutput, "100.0% proc'd:")
if isMatch && isMatch2 && isMatch3 && isMatch4 && isMatch5 && isMatch6 && err == nil {
e2e.Logf("gpu-burn workload execution completed successfully on the GPU instance")
// this stops the polling
return true, nil
} else if isMatch && isMatch2 && isMatch3 && err == nil {
e2e.Logf("gpu-burn workload still running on the GPU instance")
// return false to loop again
return false, nil
} else {
e2e.Logf("gpu-burn workload did NOT run successfully on the GPU instance")
return false, nil
}
})
// output the final pod log once
e2e.Logf("ocLogsGpuBurnOutput: \n%v", ocLogsGpuBurnOutput)
exutil.AssertWaitPollNoErr(err1, "gpu-burn workload ran abnormally")
}
type subResource struct {
name string
namespace string
channel string
startingCSV string
installedCSV string
template string
}
func (sub *subResource) createIfNotExist(oc *exutil.CLI) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace).Output()
if strings.Contains(output, "NotFound") || strings.Contains(output, "No resources") || err != nil {
applyResource(oc, "--ignore-unknown-parameters=true", "-f", sub.template, "-p", "CHANNEL="+sub.channel, "CSV_VERSION="+sub.startingCSV, "GPU_NAMESPACE="+sub.namespace)
err = wait.Poll(5*time.Second, 240*time.Second, func() (bool, error) {
state, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.state}").Output()
if err != nil {
e2e.Logf("output is %v, error is %v, and try next", state, err)
return false, nil
}
if strings.Compare(state, "AtLatestKnown") == 0 || strings.Compare(state, "UpgradeAvailable") == 0 {
return true, nil
}
e2e.Logf("sub %s state is %s, not AtLatestKnown or UpgradeAvailable", sub.name, state)
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("sub %s stat is not AtLatestKnown or UpgradeAvailable", sub.name))
installedCSV, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(installedCSV).NotTo(o.BeEmpty())
sub.installedCSV = installedCSV
} else {
e2e.Logf(fmt.Sprintf("Already exist sub in project: %s", sub.namespace))
}
}
func applyResource(oc *exutil.CLI, parameters ...string) error {
var configFile string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile("templateSubstituted.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters))
e2e.Logf("the file of resource is %s", configFile)
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Execute()
}
func (sub *subResource) delete(oc *exutil.CLI) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("sub", sub.name, "-n", sub.namespace).Output()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("csv", sub.installedCSV, "-n", sub.namespace).Output()
}
func createClusterPolicyJSONFromCSV(oc *exutil.CLI, namespace string, csvName string, policyFileName string) {
// retruns a string obejct with angle brackets "[ { clusterPolicy.json} ]"
ocCommandOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", namespace, "-ojsonpath={.metadata.annotations.alm-examples}").OutputToFile("cluster-policy-output-jq-file.txt")
o.Expect(err).NotTo(o.HaveOccurred())
// Execute a basic bash command, piping the contents of the file into jq cmd
// to remove the angle bracket around the clusterPolicy json body and retain proper formatting
rawJqOutput, err := exec.Command("bash", "-c", "cat "+ocCommandOutput+" | jq .[0]").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// we need to format this output before logging it
stringifyJqOutput := strings.TrimSpace(string(rawJqOutput))
e2e.Logf("CLusterPolicy output file after piping into jq: \n%v", stringifyJqOutput)
// rawJqOutput is of type []byte, a byte array
err = ioutil.WriteFile(policyFileName, rawJqOutput, 0644)
o.Expect(err).NotTo(o.HaveOccurred())
}
func createMachinesetbyInstanceType(oc *exutil.CLI, machinesetName string, instanceType string) {
// Get existing machinesets in cluster
ocGetMachineset, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(exutil.MapiMachineset, "-n", "openshift-machine-api", "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Existing machinesets:\n%v", ocGetMachineset)
// Get name of first machineset in existing machineset list
firstMachinesetName := exutil.GetFirstLinuxMachineSets(oc)
e2e.Logf("Got %v from machineset list", firstMachinesetName)
machinesetYamlOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(exutil.MapiMachineset, firstMachinesetName, "-n", "openshift-machine-api", "-oyaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//Create machinset by specifying a machineset name
regMachineSet := regexp.MustCompile(firstMachinesetName)
newMachinesetYaml := regMachineSet.ReplaceAllString(machinesetYamlOutput, machinesetName)
//Change instanceType to g4dn.xlarge
regInstanceType := regexp.MustCompile(`instanceType:.*`)
newInstanceType := "instanceType: " + instanceType
newMachinesetYaml = regInstanceType.ReplaceAllString(newMachinesetYaml, newInstanceType)
//Make sure the replicas is 1
regReplicas := regexp.MustCompile(`replicas:.*`)
replicasNum := "replicas: 1"
newMachinesetYaml = regReplicas.ReplaceAllString(newMachinesetYaml, replicasNum)
machinesetNewB := []byte(newMachinesetYaml)
err = ioutil.WriteFile(machinesetName+"-new.yaml", machinesetNewB, 0644)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(machinesetName + "-new.yaml")
exutil.ApplyOperatorResourceByYaml(oc, "openshift-machine-api", machinesetName+"-new.yaml")
}
|
package gpu
| ||||
function
|
openshift/openshift-tests-private
|
c3811847-8eb3-4bfd-978c-780948ff7f9b
|
runOcCreateYAML
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
func runOcCreateYAML(oc *exutil.CLI, filename string) error {
return oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filename).Execute()
}
|
gpu
| |||||
function
|
openshift/openshift-tests-private
|
1629a88d-f5a4-463a-bbe0-ce762b8dcc44
|
checkIfGPUOperatorClusterPolicyIsReady
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
func checkIfGPUOperatorClusterPolicyIsReady(oc *exutil.CLI, namespace string) bool {
// oc get clusterPolicy -o jsonpath='{.items[*].status.state}'
// returns: ready
// oc get clusterPolicy
// error: the server doesn't have a resource type "clusterPolicy"
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterPolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "NotFound") || strings.Contains(output, "No resources found") || strings.Contains(output, "doesn't have a resource type") || err != nil {
e2e.Logf("No clusterPolicy was found on this cluster")
return false
}
clusterPolicyState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterPolicy", "-o", "jsonpath='{.items[*].status.state}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// clusterPolicy has single quotes around it: 'ready' from output, need to remove them
clusterPolicyWithoutSingleQuotes := strings.ReplaceAll(clusterPolicyState, "'", "")
e2e.Logf("clusterPolicyState: %v", clusterPolicyWithoutSingleQuotes)
return strings.Compare(clusterPolicyWithoutSingleQuotes, "ready") == 0
}
|
gpu
| ||||
function
|
openshift/openshift-tests-private
|
a4d5f387-bef8-45a3-a492-349a92df0df3
|
runOcCommandWithPipeCmd
|
['"os/exec"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
func runOcCommandWithPipeCmd(oc *exutil.CLI, ocCommand string, ocArgs string, pipeCmdString string) string {
// Run the base command with arguments and capture the output in a file
ocCommandOutputFile, err := oc.AsAdmin().WithoutNamespace().Run(ocCommand).Args(ocArgs).OutputToFile("ocCommandOutputFile.txt")
o.Expect(err).NotTo(o.HaveOccurred())
// Execute a basic bash command, piping the contents of the file into another command and again capturing the output
// Checking here if we have the "10de" label detected on GPU instance
rawOutput, err := exec.Command("bash", "-c", "cat "+ocCommandOutputFile+" "+pipeCmdString).Output()
o.Expect(err).NotTo(o.HaveOccurred())
// we need to format this output before logging it
stringifyOutput := strings.TrimSpace(string(rawOutput))
return stringifyOutput
}
|
gpu
| ||||
function
|
openshift/openshift-tests-private
|
7dd68003-089d-433c-8abb-aea7576da10a
|
checkIfWorkerNodesHaveGPUInstances
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
func checkIfWorkerNodesHaveGPUInstances(oc *exutil.CLI, instanceTypePrefix string) (bool, error) {
// GPU enabled worker node instances will have the label from NFD:
// feature.node.kubernetes.io/pci-10de.present=true
// and also have labels:
// oc describe node -l feature.node.kubernetes.io/pci-10de.present=true | grep "instance-type=g4dn.xlarge"
// beta.kubernetes.io/instance-type=g4dn.xlarge
// node.kubernetes.io/instance-type=g4dn.xlarge
// instance-type=g4dn.<size>
// Run the base 'oc describe node` command and capture the output
ocDescribeNodes, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("node", "-l feature.node.kubernetes.io/pci-10de.present=true").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// example lable for g4dn instance type prefix: "node.kubernetes.io/instance-type="g4dn"
gpuInstanceLabel := "node.kubernetes.io/instance-type=" + instanceTypePrefix
instanceTypeMatched := strings.Contains(ocDescribeNodes, gpuInstanceLabel)
if !instanceTypeMatched {
e2e.Logf("No worker nodes with GPU instances were detected")
return false, nil
}
e2e.Logf("At least one worker node contains a GPU with instanceType of prefix %v :", instanceTypePrefix)
return true, nil
}
|
gpu
| ||||
function
|
openshift/openshift-tests-private
|
cee52c00-bac4-414b-974e-ff89f6027848
|
assertGPUBurnApp
|
['"regexp"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
func assertGPUBurnApp(oc *exutil.CLI, namespace string, gpuDsPodname string) {
// get the gpu-burn daemonset pod name
gpuPodsOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-oname", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
//Filter pod name base on deployment/daemonset name
regexpoprname, _ := regexp.Compile(".*" + gpuDsPodname + ".*")
isMatch := regexpoprname.MatchString(gpuPodsOutput)
gpuPodname := regexpoprname.FindAllString(gpuPodsOutput, -1)
gpuBurnPodName := gpuPodname[0]
e2e.Logf("gpuPodname is : %v", gpuBurnPodName)
// Wait 10 sec in each iteration before condition function () returns true or errors or times out after 12 mins
// Here the body under waitr.Poll(...) is execuded over and over until we timeout or func() returns true or an error.
ocLogsGpuBurnOutput := ""
err1 := wait.Poll(10*time.Second, 12*time.Minute, func() (bool, error) {
// GetPod logs from gpu-burn daemonset. Analyse later, look for "Gflop" and "errors: 0" in pod log
var err error
ocLogsGpuBurnOutput, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args(gpuBurnPodName, "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
isMatch2 := strings.Contains(ocLogsGpuBurnOutput, "Gflop")
isMatch3 := strings.Contains(ocLogsGpuBurnOutput, "errors: 0")
/* gpu-burn-daemonset pod log: example of last lines after execution completes:
96.0% proc'd: 61198 (3484 Gflop/s) errors: 0 temps: 75 C
Summary at: Thu May 12 02:00:27 UTC 2022
100.0% proc'd: 63679 (3518 Gflop/s) errors: 0 temps: 74 C
Killing processes.. done
Tested 1 GPUs:
GPU 0: OK
*/
isMatch4 := strings.Contains(ocLogsGpuBurnOutput, "Tested 1 GPUs")
isMatch5 := strings.Contains(ocLogsGpuBurnOutput, "GPU 0: OK")
isMatch6 := strings.Contains(ocLogsGpuBurnOutput, "100.0% proc'd:")
if isMatch && isMatch2 && isMatch3 && isMatch4 && isMatch5 && isMatch6 && err == nil {
e2e.Logf("gpu-burn workload execution completed successfully on the GPU instance")
// this stops the polling
return true, nil
} else if isMatch && isMatch2 && isMatch3 && err == nil {
e2e.Logf("gpu-burn workload still running on the GPU instance")
// return false to loop again
return false, nil
} else {
e2e.Logf("gpu-burn workload did NOT run successfully on the GPU instance")
return false, nil
}
})
// output the final pod log once
e2e.Logf("ocLogsGpuBurnOutput: \n%v", ocLogsGpuBurnOutput)
exutil.AssertWaitPollNoErr(err1, "gpu-burn workload ran abnormally")
}
|
gpu
| ||||
function
|
openshift/openshift-tests-private
|
396ff14f-b274-44e0-b024-f3af574e078a
|
createIfNotExist
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['subResource']
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
func (sub *subResource) createIfNotExist(oc *exutil.CLI) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace).Output()
if strings.Contains(output, "NotFound") || strings.Contains(output, "No resources") || err != nil {
applyResource(oc, "--ignore-unknown-parameters=true", "-f", sub.template, "-p", "CHANNEL="+sub.channel, "CSV_VERSION="+sub.startingCSV, "GPU_NAMESPACE="+sub.namespace)
err = wait.Poll(5*time.Second, 240*time.Second, func() (bool, error) {
state, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.state}").Output()
if err != nil {
e2e.Logf("output is %v, error is %v, and try next", state, err)
return false, nil
}
if strings.Compare(state, "AtLatestKnown") == 0 || strings.Compare(state, "UpgradeAvailable") == 0 {
return true, nil
}
e2e.Logf("sub %s state is %s, not AtLatestKnown or UpgradeAvailable", sub.name, state)
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("sub %s stat is not AtLatestKnown or UpgradeAvailable", sub.name))
installedCSV, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(installedCSV).NotTo(o.BeEmpty())
sub.installedCSV = installedCSV
} else {
e2e.Logf(fmt.Sprintf("Already exist sub in project: %s", sub.namespace))
}
}
|
gpu
| |||
function
|
openshift/openshift-tests-private
|
4413e3a3-cb1d-4fcc-8df5-f5741626904a
|
applyResource
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
func applyResource(oc *exutil.CLI, parameters ...string) error {
var configFile string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile("templateSubstituted.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
configFile = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters))
e2e.Logf("the file of resource is %s", configFile)
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Execute()
}
|
gpu
| ||||
function
|
openshift/openshift-tests-private
|
20935d7b-7c5d-4277-a412-1c0724c737ed
|
delete
|
['subResource']
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
func (sub *subResource) delete(oc *exutil.CLI) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("sub", sub.name, "-n", sub.namespace).Output()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("csv", sub.installedCSV, "-n", sub.namespace).Output()
}
|
gpu
| ||||
function
|
openshift/openshift-tests-private
|
91295a04-dbda-43f2-a75a-680eb7aea3b7
|
createClusterPolicyJSONFromCSV
|
['"io/ioutil"', '"os/exec"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
func createClusterPolicyJSONFromCSV(oc *exutil.CLI, namespace string, csvName string, policyFileName string) {
// retruns a string obejct with angle brackets "[ { clusterPolicy.json} ]"
ocCommandOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", namespace, "-ojsonpath={.metadata.annotations.alm-examples}").OutputToFile("cluster-policy-output-jq-file.txt")
o.Expect(err).NotTo(o.HaveOccurred())
// Execute a basic bash command, piping the contents of the file into jq cmd
// to remove the angle bracket around the clusterPolicy json body and retain proper formatting
rawJqOutput, err := exec.Command("bash", "-c", "cat "+ocCommandOutput+" | jq .[0]").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// we need to format this output before logging it
stringifyJqOutput := strings.TrimSpace(string(rawJqOutput))
e2e.Logf("CLusterPolicy output file after piping into jq: \n%v", stringifyJqOutput)
// rawJqOutput is of type []byte, a byte array
err = ioutil.WriteFile(policyFileName, rawJqOutput, 0644)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
gpu
| ||||
function
|
openshift/openshift-tests-private
|
c337f04d-84c2-4e90-98b3-8a8e7b39739c
|
createMachinesetbyInstanceType
|
['"io/ioutil"', '"os"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/gpu/gpu_util.go
|
func createMachinesetbyInstanceType(oc *exutil.CLI, machinesetName string, instanceType string) {
// Get existing machinesets in cluster
ocGetMachineset, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(exutil.MapiMachineset, "-n", "openshift-machine-api", "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Existing machinesets:\n%v", ocGetMachineset)
// Get name of first machineset in existing machineset list
firstMachinesetName := exutil.GetFirstLinuxMachineSets(oc)
e2e.Logf("Got %v from machineset list", firstMachinesetName)
machinesetYamlOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(exutil.MapiMachineset, firstMachinesetName, "-n", "openshift-machine-api", "-oyaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//Create machinset by specifying a machineset name
regMachineSet := regexp.MustCompile(firstMachinesetName)
newMachinesetYaml := regMachineSet.ReplaceAllString(machinesetYamlOutput, machinesetName)
//Change instanceType to g4dn.xlarge
regInstanceType := regexp.MustCompile(`instanceType:.*`)
newInstanceType := "instanceType: " + instanceType
newMachinesetYaml = regInstanceType.ReplaceAllString(newMachinesetYaml, newInstanceType)
//Make sure the replicas is 1
regReplicas := regexp.MustCompile(`replicas:.*`)
replicasNum := "replicas: 1"
newMachinesetYaml = regReplicas.ReplaceAllString(newMachinesetYaml, replicasNum)
machinesetNewB := []byte(newMachinesetYaml)
err = ioutil.WriteFile(machinesetName+"-new.yaml", machinesetNewB, 0644)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(machinesetName + "-new.yaml")
exutil.ApplyOperatorResourceByYaml(oc, "openshift-machine-api", machinesetName+"-new.yaml")
}
|
gpu
| ||||
file
|
openshift/openshift-tests-private
|
c3200e84-b45a-481a-a817-c7d4ac32d450
|
hypernto_util
|
import (
"fmt"
"regexp"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
package hypernto
import (
"fmt"
"regexp"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// isHyperNTOPodInstalled will return true if any pod is found in the given namespace, and false otherwise
func isHyperNTOPodInstalled(oc *exutil.CLI, hostedClusterName string) bool {
e2e.Logf("Checking if pod is found in namespace %s...", hostedClusterName)
deploymentList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", hostedClusterName, "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
deployNamesReg := regexp.MustCompile("cluster-node-tuning-operator")
isNTOInstalled := deployNamesReg.MatchString(deploymentList)
if !isNTOInstalled {
e2e.Logf("No pod found in namespace %s :(", hostedClusterName)
return false
}
e2e.Logf("Pod found in namespace %s!", hostedClusterName)
return true
}
// getNodePoolNamebyHostedClusterName used to get nodepool name in clusters
func getNodePoolNamebyHostedClusterName(oc *exutil.CLI, hostedClusterName, hostedClusterNS string) string {
nodePoolNameList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodepool", "-n", hostedClusterNS, "-ojsonpath='{.items[*].metadata.name}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodePoolNameList).NotTo(o.BeEmpty())
//remove single quota from nodePoolNameList, then replace space with \n
nodePoolNameStr := strings.Trim(nodePoolNameList, "'")
nodePoolNameLines := strings.Replace(nodePoolNameStr, " ", "\n", -1)
e2e.Logf("Hosted Cluster Name is: %s", hostedClusterName)
hostedClusterNameReg := regexp.MustCompile(".*" + hostedClusterName + ".*")
nodePoolName := hostedClusterNameReg.FindAllString(nodePoolNameLines, -1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodePoolName).NotTo(o.BeEmpty())
e2e.Logf("Node Pool Name is: %s", nodePoolName[0])
return nodePoolName[0]
}
// getTuningConfigMapNameWithRetry used to get tuned configmap name for specified node pool
func getTuningConfigMapNameWithRetry(oc *exutil.CLI, namespace string, filter string) string {
var configmapName string
configmapName = ""
err := wait.Poll(15*time.Second, 180*time.Second, func() (bool, error) {
configMaps, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", namespace, "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configMaps).NotTo(o.BeEmpty())
//filter the tuned confimap name
configMapsReg := regexp.MustCompile(".*" + filter + ".*")
isMatch := configMapsReg.MatchString(configMaps)
if isMatch {
tuningConfigMap := configMapsReg.FindAllString(configMaps, -1)
e2e.Logf("The list of tuned configmap is: \n%v", tuningConfigMap)
//Node Pool using MC will have two configmap
if len(tuningConfigMap) == 2 {
configmapName = tuningConfigMap[0] + " " + tuningConfigMap[1]
} else {
configmapName = tuningConfigMap[0]
}
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The value sysctl mismatch, please check")
return configmapName
}
// getTunedSystemSetValueByParamNameInHostedCluster
func getTunedSystemSetValueByParamNameInHostedCluster(oc *exutil.CLI, ntoNamespace, nodeName, oscommand, sysctlparm string) string {
var matchResult string
err := wait.Poll(15*time.Second, 180*time.Second, func() (bool, error) {
debugNodeStdout, err := oc.AsAdmin().AsGuestKubeconf().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+nodeName, "--", "chroot", "/host", oscommand, sysctlparm).Output()
o.Expect(debugNodeStdout).NotTo(o.BeEmpty())
if err == nil {
regexpstr, _ := regexp.Compile(sysctlparm + " =.*")
matchResult = regexpstr.FindString(debugNodeStdout)
e2e.Logf("The value of [ %v ] is [ %v ] on [ %v ]", sysctlparm, matchResult, nodeName)
return true, nil
}
e2e.Logf("The debug node threw BadRequest ContainerCreating or other error, try next")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Fail to execute debug node, keep threw error BadRequest ContainerCreating, please check")
return matchResult
}
// compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster
func compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster(oc *exutil.CLI, ntoNamespace, nodeName, oscommand, sysctlparm, specifiedvalue string) {
err := wait.Poll(15*time.Second, 180*time.Second, func() (bool, error) {
tunedSettings := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, nodeName, oscommand, sysctlparm)
o.Expect(tunedSettings).NotTo(o.BeEmpty())
expectedSettings := sysctlparm + " = " + specifiedvalue
if strings.Contains(tunedSettings, expectedSettings) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The value sysctl mismatch, please check")
}
// assertIfTunedProfileAppliedOnSpecifiedNode use to check if custom profile applied to a node
func assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc *exutil.CLI, namespace string, tunedNodeName string, expectedTunedName string) {
err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
currentTunedName, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io", tunedNodeName, "-ojsonpath={.status.tunedProfile}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentTunedName).NotTo(o.BeEmpty())
e2e.Logf("The profile name on the node %v is: \n %v ", tunedNodeName, currentTunedName)
expectedAppliedStatus, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io", tunedNodeName, `-ojsonpath='{.status.conditions[?(@.type=="Applied")].status}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(expectedAppliedStatus).NotTo(o.BeEmpty())
if currentTunedName != expectedTunedName && expectedAppliedStatus != "True" {
e2e.Logf("Profile '%s' has not yet been applied to %s - retrying...", expectedTunedName, tunedNodeName)
return false, nil
}
e2e.Logf("Profile '%s' has been applied to %s - continuing...", expectedTunedName, tunedNodeName)
tunedProfiles, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(expectedAppliedStatus).NotTo(o.BeEmpty())
e2e.Logf("Current profiles on each node : \n %v ", tunedProfiles)
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Profile was not applied to %s within timeout limit (30 seconds)", tunedNodeName))
}
// assertNTOPodLogsLastLinesInHostedCluster
func assertNTOPodLogsLastLinesInHostedCluster(oc *exutil.CLI, namespace string, ntoPod string, lineN string, timeDurationSec int, filter string) {
var logLineStr []string
err := wait.Poll(15*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) {
//Remove err assert for SNO, the OCP will can not access temporily when master node restart or certificate key removed
ntoPodLogs, _ := oc.AsAdmin().AsGuestKubeconf().Run("logs").Args("-n", namespace, ntoPod, "--tail="+lineN).Output()
regNTOPodLogs, err := regexp.Compile(".*" + filter + ".*")
o.Expect(err).NotTo(o.HaveOccurred())
isMatch := regNTOPodLogs.MatchString(ntoPodLogs)
if isMatch {
logLineStr = regNTOPodLogs.FindAllString(ntoPodLogs, -1)
e2e.Logf("The logs of nto pod %v is: \n%v", ntoPod, logLineStr[0])
return true, nil
}
e2e.Logf("The keywords of nto pod isn't found, try next ...")
return false, nil
})
if len(logLineStr) > 0 {
e2e.Logf("The logs of nto pod %v is: \n%v", ntoPod, logLineStr[0])
}
exutil.AssertWaitPollNoErr(err, "The tuned pod's log doesn't contain the keywords, please check")
}
// getTunedRenderInHostedCluster returns a string representation of the rendered for tuned in the given namespace
func getTunedRenderInHostedCluster(oc *exutil.CLI, namespace string) (string, error) {
return oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "tuned", "rendered", "-ojsonpath={.spec.profile[*].name}").Output()
}
// assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster use to check if custom profile applied to a node
func assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc *exutil.CLI, namespace string, nodePoolName string, expectedTunedName string) {
var (
matchTunedProfile bool
matchAppliedStatus bool
matchNum int
currentAppliedStatus string
)
err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
nodeNames, err := exutil.GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The nodes in nodepool [%v] is:\n%v", nodePoolName, nodeNames)
currentProfiles, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io").Output()
e2e.Logf("The currentProfiles in nodepool [%v] is:\n%v", nodePoolName, currentProfiles)
o.Expect(err).NotTo(o.HaveOccurred())
matchNum = 0
for i := 0; i < len(nodeNames); i++ {
currentTunedName, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io", nodeNames[i], "-ojsonpath={.status.tunedProfile}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentTunedName).NotTo(o.BeEmpty())
matchTunedProfile = strings.Contains(currentTunedName, expectedTunedName)
currentAppliedStatus, err = oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io", nodeNames[i], `-ojsonpath='{.status.conditions[?(@.type=="Applied")].status}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentAppliedStatus).NotTo(o.BeEmpty())
matchAppliedStatus = strings.Contains(currentAppliedStatus, "True")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentAppliedStatus).NotTo(o.BeEmpty())
if matchTunedProfile && matchAppliedStatus {
matchNum++
e2e.Logf("Profile '%s' matchs on %s - match times is:%v", expectedTunedName, nodeNames[i], matchNum)
}
}
if matchNum == len(nodeNames) {
tunedProfiles, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profiles on each node : \n %v ", tunedProfiles)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Profile was not applied to %s within timeout limit (30 seconds)", nodePoolName))
}
// compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster
func compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc *exutil.CLI, ntoNamespace, nodePoolName, oscommand, sysctlparm, specifiedvalue string) {
var (
isMatch bool
matchNum int
)
err := wait.Poll(15*time.Second, 180*time.Second, func() (bool, error) {
nodeNames, err := exutil.GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
nodesNum := len(nodeNames)
matchNum = 0
//all worker node in the nodepool should match the tuned profile settings
for i := 0; i < nodesNum; i++ {
tunedSettings := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, nodeNames[i], oscommand, sysctlparm)
expectedSettings := sysctlparm + " = " + specifiedvalue
if strings.Contains(tunedSettings, expectedSettings) {
matchNum++
isMatch = true
}
}
if isMatch && matchNum == nodesNum {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The value sysctl mismatch, please check")
}
// assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster used to compare the the value shouldn't match specified name
func assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc *exutil.CLI, ntoNamespace, nodePoolName, oscommand, sysctlparm, expectedMisMatchValue string) {
nodeNames, err := exutil.GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
nodesNum := len(nodeNames)
for i := 0; i < nodesNum; i++ {
stdOut := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, nodeNames[i], oscommand, sysctlparm)
o.Expect(stdOut).NotTo(o.BeEmpty())
o.Expect(stdOut).NotTo(o.ContainSubstring(expectedMisMatchValue))
}
}
// assertIfMatchKenelBootOnNodePoolLevelInHostedCluster used to compare if match the keywords
func assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc *exutil.CLI, ntoNamespace, nodePoolName, expectedMatchValue string, isMatch bool) {
nodeNames, err := exutil.GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
nodesNum := len(nodeNames)
for i := 0; i < nodesNum; i++ {
err := wait.Poll(15*time.Second, 180*time.Second, func() (bool, error) {
debugNodeStdout, err := oc.AsAdmin().AsGuestKubeconf().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+nodeNames[i], "--", "chroot", "/host", "cat", "/proc/cmdline").Output()
o.Expect(debugNodeStdout).NotTo(o.BeEmpty())
if err == nil {
e2e.Logf("The output of debug node is :\n%v)", debugNodeStdout)
if isMatch {
o.Expect(debugNodeStdout).To(o.ContainSubstring(expectedMatchValue))
} else {
o.Expect(debugNodeStdout).NotTo(o.ContainSubstring(expectedMatchValue))
}
return true, nil
}
e2e.Logf("The debug node threw BadRequest ContainerCreating or other error, try next")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Fail to execute debug node, keep threw error BadRequest ContainerCreating, please check")
}
}
// assertNTOPodLogsLastLinesInHostedCluster
func assertNTOPodLogsLastLinesInManagementCluster(oc *exutil.CLI, namespace string, ntoPod string, lineN string, timeDurationSec int, filter string) {
var logLineStr []string
err := wait.Poll(15*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) {
//Remove err assert for SNO, the OCP will can not access temporily when master node restart or certificate key removed
ntoPodLogs, _ := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", namespace, ntoPod, "--tail="+lineN).Output()
regNTOPodLogs, err := regexp.Compile(".*" + filter + ".*")
o.Expect(err).NotTo(o.HaveOccurred())
isMatch := regNTOPodLogs.MatchString(ntoPodLogs)
if isMatch {
logLineStr = regNTOPodLogs.FindAllString(ntoPodLogs, -1)
e2e.Logf("The logs of nto pod %v is: \n%v", ntoPod, logLineStr[0])
return true, nil
}
e2e.Logf("The keywords of nto pod isn't found, try next ...")
return false, nil
})
e2e.Logf("The logs of nto pod %v is: \n%v", ntoPod, logLineStr[0])
exutil.AssertWaitPollNoErr(err, "The tuned pod's log doesn't contain the keywords, please check")
}
// AssertIfNodeIsReadyByNodeNameInHostedCluster checks if the worker node is ready
func AssertIfNodeIsReadyByNodeNameInHostedCluster(oc *exutil.CLI, tunedNodeName string, timeDurationSec int) {
o.Expect(timeDurationSec).Should(o.BeNumerically(">=", 10), "Disaster error: specify the value of timeDurationSec great than 10.")
err := wait.Poll(time.Duration(timeDurationSec/10)*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) {
workerNodeStatus, err := oc.AsAdmin().AsGuestKubeconf().WithoutNamespace().Run("get").Args("nodes", tunedNodeName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeStatus).NotTo(o.BeEmpty())
if !strings.Contains(workerNodeStatus, "SchedulingDisabled") && strings.Contains(workerNodeStatus, "Ready") {
e2e.Logf("The node [%v] status is %v in hosted clusters)", tunedNodeName, workerNodeStatus)
return true, nil
}
e2e.Logf("worker node [%v] in hosted cluster checks failed, the worker node status should be Ready)", tunedNodeName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Worker node checks were not successful within timeout limit")
}
// AssertIfTunedIsReadyByNameInHostedCluster checks if the worker node is ready
func AssertIfTunedIsReadyByNameInHostedCluster(oc *exutil.CLI, tunedeName string, ntoNamespace string) {
// Assert if profile applied to label node with re-try
o.Eventually(func() bool {
tunedStatus, err := oc.AsAdmin().AsGuestKubeconf().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
if err != nil || !strings.Contains(tunedStatus, tunedeName) {
e2e.Logf("The tuned %s isn't generated, check again, err is %v", tunedeName, err)
}
e2e.Logf("The list of tuned in namespace %v is: \n%v", ntoNamespace, tunedStatus)
return strings.Contains(tunedStatus, tunedeName)
}, 5*time.Second, time.Second).Should(o.BeTrue())
}
// fuction to check given string is in array or not
func implStringArrayContains(stringArray []string, name string) bool {
// iterate over the array and compare given string to each element
for _, value := range stringArray {
if value == name {
return true
}
}
return false
}
|
package hypernto
| ||||
function
|
openshift/openshift-tests-private
|
40504945-75da-43bb-a672-a6e2d2074abe
|
isHyperNTOPodInstalled
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func isHyperNTOPodInstalled(oc *exutil.CLI, hostedClusterName string) bool {
e2e.Logf("Checking if pod is found in namespace %s...", hostedClusterName)
deploymentList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", hostedClusterName, "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
deployNamesReg := regexp.MustCompile("cluster-node-tuning-operator")
isNTOInstalled := deployNamesReg.MatchString(deploymentList)
if !isNTOInstalled {
e2e.Logf("No pod found in namespace %s :(", hostedClusterName)
return false
}
e2e.Logf("Pod found in namespace %s!", hostedClusterName)
return true
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
9068d741-76bc-4779-8d32-752220095f82
|
getNodePoolNamebyHostedClusterName
|
['"regexp"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func getNodePoolNamebyHostedClusterName(oc *exutil.CLI, hostedClusterName, hostedClusterNS string) string {
nodePoolNameList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodepool", "-n", hostedClusterNS, "-ojsonpath='{.items[*].metadata.name}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodePoolNameList).NotTo(o.BeEmpty())
//remove single quota from nodePoolNameList, then replace space with \n
nodePoolNameStr := strings.Trim(nodePoolNameList, "'")
nodePoolNameLines := strings.Replace(nodePoolNameStr, " ", "\n", -1)
e2e.Logf("Hosted Cluster Name is: %s", hostedClusterName)
hostedClusterNameReg := regexp.MustCompile(".*" + hostedClusterName + ".*")
nodePoolName := hostedClusterNameReg.FindAllString(nodePoolNameLines, -1)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodePoolName).NotTo(o.BeEmpty())
e2e.Logf("Node Pool Name is: %s", nodePoolName[0])
return nodePoolName[0]
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
fe1d1c39-e72c-4896-a774-2759ada64a8b
|
getTuningConfigMapNameWithRetry
|
['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func getTuningConfigMapNameWithRetry(oc *exutil.CLI, namespace string, filter string) string {
var configmapName string
configmapName = ""
err := wait.Poll(15*time.Second, 180*time.Second, func() (bool, error) {
configMaps, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", namespace, "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configMaps).NotTo(o.BeEmpty())
//filter the tuned confimap name
configMapsReg := regexp.MustCompile(".*" + filter + ".*")
isMatch := configMapsReg.MatchString(configMaps)
if isMatch {
tuningConfigMap := configMapsReg.FindAllString(configMaps, -1)
e2e.Logf("The list of tuned configmap is: \n%v", tuningConfigMap)
//Node Pool using MC will have two configmap
if len(tuningConfigMap) == 2 {
configmapName = tuningConfigMap[0] + " " + tuningConfigMap[1]
} else {
configmapName = tuningConfigMap[0]
}
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The value sysctl mismatch, please check")
return configmapName
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
b91712d7-c885-4d9a-9d46-3cf8a26e0376
|
getTunedSystemSetValueByParamNameInHostedCluster
|
['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func getTunedSystemSetValueByParamNameInHostedCluster(oc *exutil.CLI, ntoNamespace, nodeName, oscommand, sysctlparm string) string {
var matchResult string
err := wait.Poll(15*time.Second, 180*time.Second, func() (bool, error) {
debugNodeStdout, err := oc.AsAdmin().AsGuestKubeconf().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+nodeName, "--", "chroot", "/host", oscommand, sysctlparm).Output()
o.Expect(debugNodeStdout).NotTo(o.BeEmpty())
if err == nil {
regexpstr, _ := regexp.Compile(sysctlparm + " =.*")
matchResult = regexpstr.FindString(debugNodeStdout)
e2e.Logf("The value of [ %v ] is [ %v ] on [ %v ]", sysctlparm, matchResult, nodeName)
return true, nil
}
e2e.Logf("The debug node threw BadRequest ContainerCreating or other error, try next")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Fail to execute debug node, keep threw error BadRequest ContainerCreating, please check")
return matchResult
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
06f6ef3b-43e7-4f52-a532-122d82ebae48
|
compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster(oc *exutil.CLI, ntoNamespace, nodeName, oscommand, sysctlparm, specifiedvalue string) {
err := wait.Poll(15*time.Second, 180*time.Second, func() (bool, error) {
tunedSettings := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, nodeName, oscommand, sysctlparm)
o.Expect(tunedSettings).NotTo(o.BeEmpty())
expectedSettings := sysctlparm + " = " + specifiedvalue
if strings.Contains(tunedSettings, expectedSettings) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The value sysctl mismatch, please check")
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
aeda1856-364e-4950-882d-9dc2c5587009
|
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc *exutil.CLI, namespace string, tunedNodeName string, expectedTunedName string) {
err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
currentTunedName, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io", tunedNodeName, "-ojsonpath={.status.tunedProfile}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentTunedName).NotTo(o.BeEmpty())
e2e.Logf("The profile name on the node %v is: \n %v ", tunedNodeName, currentTunedName)
expectedAppliedStatus, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io", tunedNodeName, `-ojsonpath='{.status.conditions[?(@.type=="Applied")].status}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(expectedAppliedStatus).NotTo(o.BeEmpty())
if currentTunedName != expectedTunedName && expectedAppliedStatus != "True" {
e2e.Logf("Profile '%s' has not yet been applied to %s - retrying...", expectedTunedName, tunedNodeName)
return false, nil
}
e2e.Logf("Profile '%s' has been applied to %s - continuing...", expectedTunedName, tunedNodeName)
tunedProfiles, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(expectedAppliedStatus).NotTo(o.BeEmpty())
e2e.Logf("Current profiles on each node : \n %v ", tunedProfiles)
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Profile was not applied to %s within timeout limit (30 seconds)", tunedNodeName))
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
e1917425-254c-49a0-ba47-7bf1e867db17
|
assertNTOPodLogsLastLinesInHostedCluster
|
['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func assertNTOPodLogsLastLinesInHostedCluster(oc *exutil.CLI, namespace string, ntoPod string, lineN string, timeDurationSec int, filter string) {
var logLineStr []string
err := wait.Poll(15*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) {
//Remove err assert for SNO, the OCP will can not access temporily when master node restart or certificate key removed
ntoPodLogs, _ := oc.AsAdmin().AsGuestKubeconf().Run("logs").Args("-n", namespace, ntoPod, "--tail="+lineN).Output()
regNTOPodLogs, err := regexp.Compile(".*" + filter + ".*")
o.Expect(err).NotTo(o.HaveOccurred())
isMatch := regNTOPodLogs.MatchString(ntoPodLogs)
if isMatch {
logLineStr = regNTOPodLogs.FindAllString(ntoPodLogs, -1)
e2e.Logf("The logs of nto pod %v is: \n%v", ntoPod, logLineStr[0])
return true, nil
}
e2e.Logf("The keywords of nto pod isn't found, try next ...")
return false, nil
})
if len(logLineStr) > 0 {
e2e.Logf("The logs of nto pod %v is: \n%v", ntoPod, logLineStr[0])
}
exutil.AssertWaitPollNoErr(err, "The tuned pod's log doesn't contain the keywords, please check")
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
1a231a5c-60f5-47b1-ade9-45dbf55159f5
|
getTunedRenderInHostedCluster
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func getTunedRenderInHostedCluster(oc *exutil.CLI, namespace string) (string, error) {
return oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "tuned", "rendered", "-ojsonpath={.spec.profile[*].name}").Output()
}
|
hypernto
| |||||
function
|
openshift/openshift-tests-private
|
2e05f3a9-0780-4744-8b4b-4ea08c441f41
|
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc *exutil.CLI, namespace string, nodePoolName string, expectedTunedName string) {
var (
matchTunedProfile bool
matchAppliedStatus bool
matchNum int
currentAppliedStatus string
)
err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
nodeNames, err := exutil.GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The nodes in nodepool [%v] is:\n%v", nodePoolName, nodeNames)
currentProfiles, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io").Output()
e2e.Logf("The currentProfiles in nodepool [%v] is:\n%v", nodePoolName, currentProfiles)
o.Expect(err).NotTo(o.HaveOccurred())
matchNum = 0
for i := 0; i < len(nodeNames); i++ {
currentTunedName, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io", nodeNames[i], "-ojsonpath={.status.tunedProfile}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentTunedName).NotTo(o.BeEmpty())
matchTunedProfile = strings.Contains(currentTunedName, expectedTunedName)
currentAppliedStatus, err = oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io", nodeNames[i], `-ojsonpath='{.status.conditions[?(@.type=="Applied")].status}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentAppliedStatus).NotTo(o.BeEmpty())
matchAppliedStatus = strings.Contains(currentAppliedStatus, "True")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentAppliedStatus).NotTo(o.BeEmpty())
if matchTunedProfile && matchAppliedStatus {
matchNum++
e2e.Logf("Profile '%s' matchs on %s - match times is:%v", expectedTunedName, nodeNames[i], matchNum)
}
}
if matchNum == len(nodeNames) {
tunedProfiles, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", namespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profiles on each node : \n %v ", tunedProfiles)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Profile was not applied to %s within timeout limit (30 seconds)", nodePoolName))
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
48642242-192e-48a7-ad95-91964294fd89
|
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc *exutil.CLI, ntoNamespace, nodePoolName, oscommand, sysctlparm, specifiedvalue string) {
var (
isMatch bool
matchNum int
)
err := wait.Poll(15*time.Second, 180*time.Second, func() (bool, error) {
nodeNames, err := exutil.GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
nodesNum := len(nodeNames)
matchNum = 0
//all worker node in the nodepool should match the tuned profile settings
for i := 0; i < nodesNum; i++ {
tunedSettings := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, nodeNames[i], oscommand, sysctlparm)
expectedSettings := sysctlparm + " = " + specifiedvalue
if strings.Contains(tunedSettings, expectedSettings) {
matchNum++
isMatch = true
}
}
if isMatch && matchNum == nodesNum {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The value sysctl mismatch, please check")
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
c7387b62-e1e0-4001-9c43-2cee36b2178a
|
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc *exutil.CLI, ntoNamespace, nodePoolName, oscommand, sysctlparm, expectedMisMatchValue string) {
nodeNames, err := exutil.GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
nodesNum := len(nodeNames)
for i := 0; i < nodesNum; i++ {
stdOut := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, nodeNames[i], oscommand, sysctlparm)
o.Expect(stdOut).NotTo(o.BeEmpty())
o.Expect(stdOut).NotTo(o.ContainSubstring(expectedMisMatchValue))
}
}
|
hypernto
| |||||
function
|
openshift/openshift-tests-private
|
40a3828d-9061-4e6d-aeda-eddafac25de1
|
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster
|
['"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc *exutil.CLI, ntoNamespace, nodePoolName, expectedMatchValue string, isMatch bool) {
nodeNames, err := exutil.GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
nodesNum := len(nodeNames)
for i := 0; i < nodesNum; i++ {
err := wait.Poll(15*time.Second, 180*time.Second, func() (bool, error) {
debugNodeStdout, err := oc.AsAdmin().AsGuestKubeconf().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+nodeNames[i], "--", "chroot", "/host", "cat", "/proc/cmdline").Output()
o.Expect(debugNodeStdout).NotTo(o.BeEmpty())
if err == nil {
e2e.Logf("The output of debug node is :\n%v)", debugNodeStdout)
if isMatch {
o.Expect(debugNodeStdout).To(o.ContainSubstring(expectedMatchValue))
} else {
o.Expect(debugNodeStdout).NotTo(o.ContainSubstring(expectedMatchValue))
}
return true, nil
}
e2e.Logf("The debug node threw BadRequest ContainerCreating or other error, try next")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Fail to execute debug node, keep threw error BadRequest ContainerCreating, please check")
}
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
43c60ce4-fe86-4632-b130-004127fb3478
|
assertNTOPodLogsLastLinesInManagementCluster
|
['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func assertNTOPodLogsLastLinesInManagementCluster(oc *exutil.CLI, namespace string, ntoPod string, lineN string, timeDurationSec int, filter string) {
var logLineStr []string
err := wait.Poll(15*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) {
//Remove err assert for SNO, the OCP will can not access temporily when master node restart or certificate key removed
ntoPodLogs, _ := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", namespace, ntoPod, "--tail="+lineN).Output()
regNTOPodLogs, err := regexp.Compile(".*" + filter + ".*")
o.Expect(err).NotTo(o.HaveOccurred())
isMatch := regNTOPodLogs.MatchString(ntoPodLogs)
if isMatch {
logLineStr = regNTOPodLogs.FindAllString(ntoPodLogs, -1)
e2e.Logf("The logs of nto pod %v is: \n%v", ntoPod, logLineStr[0])
return true, nil
}
e2e.Logf("The keywords of nto pod isn't found, try next ...")
return false, nil
})
e2e.Logf("The logs of nto pod %v is: \n%v", ntoPod, logLineStr[0])
exutil.AssertWaitPollNoErr(err, "The tuned pod's log doesn't contain the keywords, please check")
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
9f1dcd57-2b24-4d41-95ef-07decca7515e
|
AssertIfNodeIsReadyByNodeNameInHostedCluster
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func AssertIfNodeIsReadyByNodeNameInHostedCluster(oc *exutil.CLI, tunedNodeName string, timeDurationSec int) {
o.Expect(timeDurationSec).Should(o.BeNumerically(">=", 10), "Disaster error: specify the value of timeDurationSec great than 10.")
err := wait.Poll(time.Duration(timeDurationSec/10)*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) {
workerNodeStatus, err := oc.AsAdmin().AsGuestKubeconf().WithoutNamespace().Run("get").Args("nodes", tunedNodeName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeStatus).NotTo(o.BeEmpty())
if !strings.Contains(workerNodeStatus, "SchedulingDisabled") && strings.Contains(workerNodeStatus, "Ready") {
e2e.Logf("The node [%v] status is %v in hosted clusters)", tunedNodeName, workerNodeStatus)
return true, nil
}
e2e.Logf("worker node [%v] in hosted cluster checks failed, the worker node status should be Ready)", tunedNodeName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Worker node checks were not successful within timeout limit")
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
116a6d27-cc06-4234-9468-b0fe82bf7e13
|
AssertIfTunedIsReadyByNameInHostedCluster
|
['"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func AssertIfTunedIsReadyByNameInHostedCluster(oc *exutil.CLI, tunedeName string, ntoNamespace string) {
// Assert if profile applied to label node with re-try
o.Eventually(func() bool {
tunedStatus, err := oc.AsAdmin().AsGuestKubeconf().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
if err != nil || !strings.Contains(tunedStatus, tunedeName) {
e2e.Logf("The tuned %s isn't generated, check again, err is %v", tunedeName, err)
}
e2e.Logf("The list of tuned in namespace %v is: \n%v", ntoNamespace, tunedStatus)
return strings.Contains(tunedStatus, tunedeName)
}, 5*time.Second, time.Second).Should(o.BeTrue())
}
|
hypernto
| ||||
function
|
openshift/openshift-tests-private
|
c9359675-9c9b-482a-9733-674323801f0d
|
implStringArrayContains
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto_util.go
|
func implStringArrayContains(stringArray []string, name string) bool {
// iterate over the array and compare given string to each element
for _, value := range stringArray {
if value == name {
return true
}
}
return false
}
|
hypernto
| |||||
test
|
openshift/openshift-tests-private
|
b5abf48a-3d90-437a-a97f-3028d7220d01
|
hypernto
|
import (
"context"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
package hypernto
import (
"context"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-tuning-node] PSAP should", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIForKubeOpenShift("hypernto-test")
ntoNamespace = "openshift-cluster-node-tuning-operator"
tunedWithDiffProfileNameAKSPidmax string
tunedWithInvalidProfileName string
tunedWithNodeLevelProfileName string
tunedWithNodeLevelProfileNameAKSVMRatio string
tunedWithNodeLevelProfileNameAKSVMRatio18 string
tunedWithNodeLevelProfileNameAKSPidmax string
tunedWithNodeLevelProfileNameAKSPidmax16 string
tunedWithNodeLevelProfileNameAKSPidmax1688 string
tunedWithKernelBootProfileName string
isNTO bool
isNTO2 bool
guestClusterName string
guestClusterNS string
guestClusterKube string
hostedClusterNS string
guestClusterName2 string
guestClusterNS2 string
guestClusterKube2 string
hostedClusterNS2 string
iaasPlatform string
firstNodePoolName string
secondNodePoolName string
ctx context.Context
isAKS bool
tunedWithSameProfileNameAKSPidmax string
)
g.BeforeEach(func() {
//First Hosted Cluster
guestClusterName, guestClusterKube, hostedClusterNS = exutil.ValidHypershiftAndGetGuestKubeConf(oc)
e2e.Logf("%s, %s, %s", guestClusterName, guestClusterKube, hostedClusterNS)
guestClusterNS = hostedClusterNS + "-" + guestClusterName
e2e.Logf("HostedClusterControlPlaneNS: %v", guestClusterNS)
// ensure NTO operator is installed
isNTO = isHyperNTOPodInstalled(oc, guestClusterNS)
oc.SetGuestKubeconf(guestClusterKube)
tunedWithSameProfileNameAKSPidmax = exutil.FixturePath("testdata", "psap", "hypernto", "tuned-with-sameprofilename-aks-pidmax.yaml")
tunedWithDiffProfileNameAKSPidmax = exutil.FixturePath("testdata", "psap", "hypernto", "tuned-with-diffprofilename-aks-pidmax.yaml")
tunedWithInvalidProfileName = exutil.FixturePath("testdata", "psap", "hypernto", "nto-basic-tuning-sysctl-invalid.yaml")
tunedWithNodeLevelProfileName = exutil.FixturePath("testdata", "psap", "hypernto", "nto-basic-tuning-sysctl-nodelevel.yaml")
tunedWithNodeLevelProfileNameAKSVMRatio = exutil.FixturePath("testdata", "psap", "hypernto", "nto-basic-tuning-sysctl-nodelevel-aks-vmdratio.yaml")
tunedWithNodeLevelProfileNameAKSVMRatio18 = exutil.FixturePath("testdata", "psap", "hypernto", "nto-basic-tuning-sysctl-nodelevel-aks-vmdratio-18.yaml")
tunedWithNodeLevelProfileNameAKSPidmax = exutil.FixturePath("testdata", "psap", "hypernto", "nto-basic-tuning-sysctl-nodelevel-aks-pidmax.yaml")
tunedWithNodeLevelProfileNameAKSPidmax16 = exutil.FixturePath("testdata", "psap", "hypernto", "nto-basic-tuning-sysctl-nodelevel-aks-pidmax-16.yaml")
tunedWithNodeLevelProfileNameAKSPidmax1688 = exutil.FixturePath("testdata", "psap", "hypernto", "nto-basic-tuning-sysctl-nodelevel-aks-pidmax-16-88.yaml")
tunedWithKernelBootProfileName = exutil.FixturePath("testdata", "psap", "hypernto", "nto-basic-tuning-kernel-boot.yaml")
//get IaaS platform
ctx = context.Background()
if isAKS, _ = exutil.IsAKSCluster(ctx, oc); isAKS {
iaasPlatform = "aks"
} else {
iaasPlatform = exutil.CheckPlatform(oc)
}
e2e.Logf("Cloud provider is: %v", iaasPlatform)
})
g.It("HyperShiftMGMT-Author:liqcui-Medium-53875-NTO Support profile that have the same name with tuned on hypershift [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-pidmax in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithSameProfileNameAKSPidmax)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-pidmax"))
//Apply tuned profile to hosted clusters
exutil.By("Apply tunedCconfig hc-nodepool-pidmax in hosted cluster nodepool")
nodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(nodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in hosted cluster, this worker node will be labeled with hc-nodepool-pidmax=")
workerNodeName, err := exutil.GetFirstLinuxWorkerNodeInHostedCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-pidmax\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap hc-nodepool-pidmax created in hosted cluster nodepool")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, nodePoolName)
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + nodePoolName))
exutil.By("Check if the tuned hc-nodepool-pidmax is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-pidmax"))
exutil.By("Get the tuned pod name that running on labeled node with hc-nodepool-pidmax=")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Label the worker nodes with hc-nodepool-pidmax=")
defer oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-pidmax-").Execute()
err = oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-pidmax=").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned profile applied to labeled worker nodes with hc-nodepool-pidmax=")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "hc-nodepool-pidmax")
exutil.By("Assert recommended profile (hc-nodepool-pidmax) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(hc-nodepool-pidmax\) matches current configuration|static tuning from profile 'hc-nodepool-pidmax' applied`)
exutil.By("Check if the setting of sysctl kernel.pid_max applied to labeled worker nodes, expected value is 868686")
compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Remove the custom tuned profile from node pool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Remove custom tuned profile to check if kernel.pid_max rollback to origin value
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from labeled worker nodes, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
pidMaxValue := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max")
o.Expect(pidMaxValue).NotTo(o.BeEmpty())
o.Expect(pidMaxValue).NotTo(o.ContainSubstring("868686"))
})
g.It("HyperShiftMGMT-Author:liqcui-Medium-53876-NTO Operand logs errors when applying profile with invalid settings in HyperShift. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in hostedClusterNS namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-invalid", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-invalid in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithInvalidProfileName)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-invalid"))
//Apply tuned profile to hosted clusters
exutil.By("Apply tunedCconfig hc-nodepool-invalid in hosted cluster nodepool")
nodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(nodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in hosted cluster, this worker node will be labeled with hc-nodepool-invalid=")
workerNodeName, err := exutil.GetFirstLinuxWorkerNodeInHostedCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-invalid\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap hc-nodepool-invalid created in hosted cluster nodepool")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, nodePoolName)
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + nodePoolName))
exutil.By("Check if the tuned hc-nodepool-invalid is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-invalid"))
exutil.By("Get the tuned pod name that running on labeled node with hc-nodepool-invalid=")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Label the worker nodes with hc-nodepool-invalid=")
defer oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-invalid-").Execute()
err = oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-invalid=").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned profile applied to labeled worker nodes with hc-nodepool-invalid=")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "hc-nodepool-invalid")
exutil.By("Assert recommended profile (hc-nodepool-invalid) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(hc-nodepool-invalid\) matches current configuration|static tuning from profile 'hc-nodepool-invalid' applied`)
exutil.By("Assert Failed to read sysctl parameter in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "20", 300, `failed to read the original value|sysctl option kernel.pid_maxinvalid will not be set`)
expectedDegradedStatus, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io", workerNodeName, `-ojsonpath='{.status.conditions[?(@.type=="Degraded")].status}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(expectedDegradedStatus).NotTo(o.BeEmpty())
o.Expect(expectedDegradedStatus).To(o.ContainSubstring("True"))
exutil.By("Check if the setting of sysctl kernel.pid_max applied to labeled worker nodes, expected value is 868686")
compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to labeled worker nodes, expected value is 56")
compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from node pool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Remove custom tuned profile to check if kernel.pid_max and vm.dirty_ratio rollback to origin value
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-invalid", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from labeled worker nodes, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
pidMaxValue := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max")
o.Expect(pidMaxValue).NotTo(o.BeEmpty())
o.Expect(pidMaxValue).NotTo(o.ContainSubstring("868686"))
vmDirtyRatioValue := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "vm.dirty_ratio")
o.Expect(vmDirtyRatioValue).NotTo(o.BeEmpty())
o.Expect(vmDirtyRatioValue).NotTo(o.ContainSubstring("56"))
})
g.It("HyperShiftMGMT-Author:liqcui-Medium-53877-NTO support tuning sysctl that applied to all nodes of nodepool-level settings in hypershift. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSVMRatio)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
//Apply tuned profile to hosted clusters
exutil.By("Apply tunedCconfig hc-nodepool-vmdratio in hosted cluster nodepool")
nodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(nodePoolName).NotTo(o.BeEmpty())
workerNodeName, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap hc-nodepool-vmdratio created in hosted cluster nodepool")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, nodePoolName)
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + nodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-vmdratio"))
exutil.By("Get the tuned pod name that running on first node of nodepool")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Check if the tuned profile applied to all worker node in specifed nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to labeled worker nodes, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from node pool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Remove custom tuned profile to check if kernel.pid_max and vm.dirty_ratio rollback to origin value
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "openshift-node")
exutil.By("The value of vm.dirty_ratio on specified nodepool should not equal to 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
})
g.It("HyperShiftMGMT-Author:liqcui-Medium-53886-NTO support tuning sysctl with different name that applied to one labeled node of nodepool in hypershift. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax-cm", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-pidmax in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithDiffProfileNameAKSPidmax)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-pidmax-cm"))
//Apply tuned profile to hosted clusters
exutil.By("Apply tunedCconfig hc-nodepool-pidmax in hosted cluster nodepool")
nodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(nodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in hosted cluster, this worker node will be labeled with hc-nodepool-pidmax=")
workerNodeName, err := exutil.GetFirstLinuxWorkerNodeInHostedCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-pidmax-cm\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap hc-nodepool-pidmax created in hosted cluster nodepool")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, nodePoolName)
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + nodePoolName))
exutil.By("Check if the tuned hc-nodepool-pidmax is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-pidmax-tuned"))
exutil.By("Get the tuned pod name that running on labeled node with hc-nodepool-pidmax=")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Label the worker nodes with hc-nodepool-pidmax=")
defer oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-pidmax-").Execute()
err = oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-pidmax=").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned profile applied to labeled worker nodes with hc-nodepool-pidmax=")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "hc-nodepool-pidmax-profile")
exutil.By("Assert recommended profile (hc-nodepool-pidmax-profile) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(hc-nodepool-pidmax-profile\) matches current configuration|static tuning from profile 'hc-nodepool-pidmax-profile' applied`)
exutil.By("Check if the setting of sysctl kernel.pid_max applied to labeled worker nodes, expected value is 868686")
compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Remove the custom tuned profile from node pool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Remove custom tuned profile to check if kernel.pid_max rollback to origin value
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax-cm", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from labeled worker nodes, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
pidMaxValue := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max")
o.Expect(pidMaxValue).NotTo(o.BeEmpty())
o.Expect(pidMaxValue).NotTo(o.ContainSubstring("868686"))
})
g.It("Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-54522-NTO Applying tuning which requires kernel boot parameters. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--ignore-not-found").Execute()
isMatch := exutil.CheckAllNodepoolReadyByHostedClusterName(oc, "hugepages-nodepool", hostedClusterNS, 300)
o.Expect(isMatch).To(o.Equal(true))
}()
exutil.By("Create custom node pool in hosted cluster")
if iaasPlatform == "aws" {
exutil.CreateCustomNodePoolInHypershift(oc, "aws", guestClusterName, "hugepages-nodepool", "1", "m5.xlarge", "InPlace", hostedClusterNS, "")
} else if iaasPlatform == "azure" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "azure", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
} else if iaasPlatform == "aks" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "aks", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
}
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, "hugepages-nodepool", 900, hostedClusterNS)
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hugepages", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap tuned-hugepages in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithKernelBootProfileName)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hugepages"))
exutil.By("Pick one worker node in custom node pool of hosted cluster")
workerNodeName, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, "hugepages-nodepool")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hugepages-nodepool", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"tuned-hugepages\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-hugepages-nodepool created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "hugepages-nodepool")
o.Expect(configMaps).To(o.ContainSubstring("hugepages-nodepool"))
exutil.By("Check if the configmap applied to tuned-hugepages-nodepool in management cluster")
exutil.AssertIfNodePoolUpdatingConfigByName(oc, "hugepages-nodepool", 360, hostedClusterNS)
exutil.By("Check if the tuned hugepages-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hugepages"))
exutil.By("Get the tuned pod name that running on custom node pool worker node")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Check if the tuned profile applied to custom node pool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node-hugepages")
exutil.By("Assert hugepagesz match in /proc/cmdline on the worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz", true)
exutil.By("Remove the custom tuned profile from node pool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hugepages", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hugepages", "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the worker node is ready after reboot due to removing kernel boot settings")
AssertIfNodeIsReadyByNodeNameInHostedCluster(oc, workerNodeName, 360)
exutil.By("Check if the removed configmap applied to tuned-hugepages-nodepool in management cluster")
exutil.AssertIfNodePoolUpdatingConfigByName(oc, "hugepages-nodepool", 360, hostedClusterNS)
exutil.By("Check if the custom tuned profile removed from labeled worker nodes, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
exutil.By("Assert hugepagesz match in /proc/cmdline on the worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz", false)
})
g.It("Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-56609-NTO Scale out node pool which applied tuning with required kernel boot. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--ignore-not-found").Execute()
isMatch := exutil.CheckAllNodepoolReadyByHostedClusterName(oc, "hugepages-nodepool", hostedClusterNS, 300)
o.Expect(isMatch).To(o.Equal(true))
}()
exutil.By("Create custom node pool in hosted cluster")
if iaasPlatform == "aws" {
exutil.CreateCustomNodePoolInHypershift(oc, "aws", guestClusterName, "hugepages-nodepool", "1", "m5.xlarge", "InPlace", hostedClusterNS, "")
} else if iaasPlatform == "azure" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "azure", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
} else if iaasPlatform == "aks" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "aks", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
}
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, "hugepages-nodepool", 720, hostedClusterNS)
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hugepages", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap tuned-hugepages in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithKernelBootProfileName)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hugepages"))
exutil.By("Pick one worker node in custom node pool of hosted cluster")
workerNodeName, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, "hugepages-nodepool")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hugepages-nodepool", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"tuned-hugepages\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-hugepages-nodepool created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "hugepages-nodepool")
o.Expect(configMaps).To(o.ContainSubstring("hugepages-nodepool"))
exutil.By("Check if the configmap applied to tuned-hugepages-nodepool in management cluster")
exutil.AssertIfNodePoolUpdatingConfigByName(oc, "hugepages-nodepool", 360, hostedClusterNS)
exutil.By("Check if the tuned hugepages-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hugepages"))
exutil.By("Get the tuned pod name that running on custom node pool worker node")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Check if the tuned profile applied to custom node pool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node-hugepages")
exutil.By("Assert hugepagesz match in /proc/cmdline on the worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz", true)
exutil.By("Scale out a new worker node in custom nodepool hugepages-nodepool")
err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--replicas=2").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if updating config applied to custom node pool in hosted cluster")
exutil.AssertIfNodePoolUpdatingConfigByName(oc, "hugepages-nodepool", 720, hostedClusterNS)
exutil.By("Check if the custom tuned profile openshift-node-hugepages applied to all nodes of custom nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "openshift-node-hugepages")
exutil.By("Assert hugepagesz match in /proc/cmdline on all nodes include the second new worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz", true)
})
g.It("Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-55360-NTO does not generate MachineConfigs with bootcmdline from manual change to Profile status.bootcmdline. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--ignore-not-found").Execute()
isMatch := exutil.CheckAllNodepoolReadyByHostedClusterName(oc, "hugepages-nodepool", hostedClusterNS, 300)
o.Expect(isMatch).To(o.Equal(true))
}()
exutil.By("Create custom node pool in hosted cluster")
if iaasPlatform == "aws" {
exutil.CreateCustomNodePoolInHypershift(oc, "aws", guestClusterName, "hugepages-nodepool", "1", "m5.xlarge", "InPlace", hostedClusterNS, "")
} else if iaasPlatform == "azure" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "azure", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
} else if iaasPlatform == "aks" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "aks", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
}
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, "hugepages-nodepool", 720, hostedClusterNS)
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hugepages", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap tuned-hugepages in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithKernelBootProfileName)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hugepages"))
exutil.By("Pick one worker node in custom node pool of hosted cluster")
workerNodeName, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, "hugepages-nodepool")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
exutil.By("Get operator pod name in hosted cluster controlplane namespaceh")
ntoOperatorPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", guestClusterNS, "-lname=cluster-node-tuning-operator", "-ojsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ntoOperatorPodName).NotTo(o.BeEmpty())
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hugepages-nodepool", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"tuned-hugepages\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-hugepages-nodepool created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "hugepages-nodepool")
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("hugepages-nodepool"))
exutil.By("Check if the configmap applied to tuned-hugepages-nodepool in management cluster")
exutil.AssertIfNodePoolUpdatingConfigByName(oc, "hugepages-nodepool", 360, hostedClusterNS)
exutil.By("Check if the tuned hugepages-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hugepages"))
exutil.By("Get the tuned pod name that running on custom node pool worker node")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Check if the tuned profile applied to custom node pool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node-hugepages")
exutil.By("Assert hugepagesz match in /proc/cmdline on the worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz", true)
exutil.By("Manually change the hugepage value in the worker node of custom nodepool hugepages-nodepool in hosted clusters")
err = oc.AsAdmin().AsGuestKubeconf().Run("patch").Args("-n", ntoNamespace, "profile/"+workerNodeName, "--type", "merge", "-p", `{"status":{"bootcmdline": "hugepagesz=2M hugepages=10"}}`).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the value of profile change in the worker node of custom nodepool hugepages-nodepool in hosted clusters, the expected value is still hugepagesz=2M hugepages=50")
bootCMDLinestdOut, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", ntoNamespace, "profile/"+workerNodeName, "-ojsonpath='{.status.bootcmdline}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("status.bootcmdline is: %v", bootCMDLinestdOut)
o.Expect(bootCMDLinestdOut).NotTo(o.ContainSubstring("hugepagesz=2M hugepages=50"))
//The field of bootcmdline has been deprecated
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, "hugepages-nodepool", 360, hostedClusterNS)
exutil.By("Check if the custom tuned profile openshift-node-hugepages applied to all nodes of custom nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "openshift-node-hugepages")
exutil.By("Assert hugepagesz match in /proc/cmdline on all nodes include the second new worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz=2M hugepages=50", true)
})
g.It("Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-55359-NTO applies one configmap that is referenced in two nodepools in the same hosted cluster. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
firstNodePoolName = "hc-custom-nodepool"
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSVMRatio)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--ignore-not-found").Execute()
isMatch := exutil.CheckAllNodepoolReadyByHostedClusterName(oc, firstNodePoolName, hostedClusterNS, 300)
o.Expect(isMatch).To(o.Equal(true))
}()
exutil.By("Create custom node pool in hosted cluster")
if iaasPlatform == "aws" {
exutil.CreateCustomNodePoolInHypershift(oc, "aws", guestClusterName, firstNodePoolName, "1", "m5.xlarge", "InPlace", hostedClusterNS, "")
} else if iaasPlatform == "azure" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "azure", guestClusterName, firstNodePoolName, "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
} else if iaasPlatform == "aks" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "aks", guestClusterName, firstNodePoolName, "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
}
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, firstNodePoolName, 720, hostedClusterNS)
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
secondNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(secondNodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in first custom node pool of hosted cluster")
workerNodeNameInFirstNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, firstNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInFirstNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in first nodepool: %v", workerNodeNameInFirstNodepool)
exutil.By("Pick one worker node in second node pool of hosted cluster")
workerNodeNameInSecondtNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, secondNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInSecondtNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in second nodepool: %v", workerNodeNameInSecondtNodepool)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "openshift-node")
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondtNodepool, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hc-nodepool-vmdratio", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Apply the tuned profile in first nodepool {firstNodePoolName}
exutil.By("Apply the tuned profile in first nodepool {firstNodePoolName} in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-{firstNodePoolName} created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+firstNodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + firstNodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-vmdratio"))
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInFirstNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInFirstNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInFirstNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInFirstNodePool)
exutil.By("Check if the tuned profile applied to first custom nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in the first nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the first custom nodepool, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the second nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Apply the tuned profile in second nodepool
exutil.By("Apply the tuned profile in second nodepool in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the tuned pod name that running on second nodepool worker node")
tunedPodNameInSecondNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInSecondtNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInSecondNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInSecondNodePool)
exutil.By("Check if the tuned profile applied to second nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondtNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in the first nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the second nodepool, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from the first nodepool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the second nodepool, no impact with removing vm.dirty_ratio setting in first nodepool")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from the second nodepool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the second nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Clean up all left resrouce/settings
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "openshift-node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "openshift-node")
})
g.It("Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-53885-NTO applies different configmaps that reference to into two node pool in the same hosted clusters. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
firstNodePoolName = "hc-custom-nodepool"
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio and hc-nodepool-pidmax in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSVMRatio)
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSPidmax)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-pidmax"))
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--ignore-not-found").Execute()
isMatch := exutil.CheckAllNodepoolReadyByHostedClusterName(oc, firstNodePoolName, hostedClusterNS, 300)
o.Expect(isMatch).To(o.Equal(true))
}()
exutil.By("Create custom node pool in hosted cluster")
if iaasPlatform == "aws" {
exutil.CreateCustomNodePoolInHypershift(oc, "aws", guestClusterName, firstNodePoolName, "1", "m5.xlarge", "InPlace", hostedClusterNS, "")
} else if iaasPlatform == "azure" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "azure", guestClusterName, firstNodePoolName, "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
} else if iaasPlatform == "aks" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "aks", guestClusterName, firstNodePoolName, "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
}
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, firstNodePoolName, 720, hostedClusterNS)
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
secondNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(secondNodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in first custom node pool of hosted cluster")
workerNodeNameInFirstNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, firstNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInFirstNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in first nodepool: %v", workerNodeNameInFirstNodepool)
exutil.By("Pick one worker node in second node pool of hosted cluster")
workerNodeNameInSecondtNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, secondNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInSecondtNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in second nodepool: %v", workerNodeNameInSecondtNodepool)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "openshift-node")
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondtNodepool, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hc-nodepool-vmdratio", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hc-nodepool-pidmax", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Apply the tuned profile in first nodepool {firstNodePoolName}
exutil.By("Apply the tuned profile in first nodepool {firstNodePoolName} in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Apply the tuned profile in second nodepool {secondNodePoolName}
exutil.By("Apply the tuned profile in second nodepool {secondNodePoolName} in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-pidmax\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-{firstNodePoolName} created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+firstNodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + firstNodePoolName))
exutil.By("Check if the configmap tuned-{secondNodePoolName} created in corresponding hosted ns in management cluster")
configMaps = getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+secondNodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + secondNodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx and hc-nodepool-pidmax-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-vmdratio"))
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-pidmax"))
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInFirstNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInFirstNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInFirstNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInFirstNodePool)
exutil.By("Get the tuned pod name that running on second nodepool worker node")
tunedPodNameInSecondNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInSecondtNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInSecondNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInSecondNodePool)
exutil.By("Check if the tuned profile applied to first custom nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to second nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondtNodepool, "hc-nodepool-pidmax")
exutil.By("Check if the tuned profile applied to all worker node in the first nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in the second nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "hc-nodepool-pidmax")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Assert recommended profile (hc-nodepool-pidmax) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(hc-nodepool-pidmax\) matches current configuration|static tuning from profile 'hc-nodepool-pidmax' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the first custom nodepool, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applied to worker nodes in the second custom nodepool, expected value is 868686")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Compare the sysctl kernel.pid_max not equal to 868686 in first nodepool
exutil.By("Check if the setting of sysctl kernel.pid_max shouldn't applied to worker nodes in the first nodepool, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Compare the sysctl vm.dirty_ratio not equal to 56 in second nodepool
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the second nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from the first nodepool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned profile still applied to all worker node in the second nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "hc-nodepool-pidmax")
//Compare the sysctl vm.dirty_ratio not equal to 56 in first nodepool
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max still applied to worker nodes in the second nodepool, no impact with removing vm.dirty_ratio setting in first nodepool")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Remove the custom tuned profile from the second nodepool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Compare the sysctl vm.dirty_ratio not equal to 56 in first nodepool
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl kernel.pid_max not equal to 868686 in second nodepool
exutil.By("Check if the setting of sysctl kernel.pid_max shouldn't applied to worker nodes in the second nodepool, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Clean up all left resrouce/settings
exutil.By("Remove configmap from management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "openshift-node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "openshift-node")
})
g.It("HyperShiftMGMT-Author:liqcui-Medium-54546-NTO applies two Tuneds from two configmap referenced in one nodepool of a hosted cluster on hypershift.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio and hc-nodepool-pidmax in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSVMRatio)
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSPidmax)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-pidmax"))
//Apply tuned profile to hosted clusters
exutil.By("Get the default nodepool name in hosted cluster")
nodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(nodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in first custom node pool of hosted cluster")
workerNodeNameInNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in first nodepool: %v", workerNodeNameInNodepool)
// //Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInNodepool, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hc-nodepool-vmdratio", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hc-nodepool-pidmax", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Apply the tuned profile in nodepool in hostedcluster
exutil.By("Apply the tuned profile in default nodepool in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"},{\"name\": \"hc-nodepool-pidmax\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-{nodePoolName} created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+nodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + nodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx and hc-nodepool-pidmax-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-vmdratio"))
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-pidmax"))
exutil.By("Get the tuned pod name that running on default nodepool worker node")
tunedPodNameInNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInNodePool)
exutil.By("Check if the tuned profile applied to nodepool worker nodes, the second profile hc-nodepool-pidmax take effective by default, the first one won't take effective")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInNodepool, "hc-nodepool-pidmax")
exutil.By("Check if the tuned profile applied to all worker node in the second nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "hc-nodepool-pidmax")
exutil.By("Assert recommended profile (hc-nodepool-pidmax) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInNodePool, "12", 300, `recommended profile \(hc-nodepool-pidmax\) matches current configuration|static tuning from profile 'hc-nodepool-pidmax' applied`)
exutil.By("Check if the setting of sysctl kernel.pid_max applied to worker nodes in the default nodepool, expected value is 868686")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "kernel.pid_max", "868686")
//Compare the sysctl vm.dirty_ratio not equal to 56 in default nodepool
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Chnagge the hc-nodepool-vmdratio with a higher priority in management cluster, the lower number of priority with higher priority")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSVMRatio18)
exutil.By("Check if the tuned profile hc-nodepool-vmdratio applied to all worker node in the nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the nodepool, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl kernel.pid_max not equal to 868686 in first nodepool
exutil.By("Check if the setting of sysctl kernel.pid_max shouldn't applied to worker nodes in the nodepool, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Chnagge custom profile include setting with <openshift-node,hc-nodepool-vmdratio> and set priority to 16 in management cluster, both custom profile take effective")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSPidmax16)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the nodepool, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applied to worker nodes in the default nodepool, expected value is 868686")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Chnagge the value of kernel.pid_max of custom profile hc-nodepool-pidmax in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSPidmax1688)
exutil.By("Check if the setting of sysctl kernel.pid_max applied to worker nodes in the default nodepool, expected value is 888888")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "kernel.pid_max", "888888")
exutil.By("Remove the custom tuned profile from the first nodepool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Compare the sysctl vm.dirty_ratio not equal to 56 in first nodepool
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl kernel.pid_max not equal to 868686 in second nodepool
exutil.By("Check if the setting of sysctl kernel.pid_max shouldn't applied to worker nodes in the nodepool, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "kernel.pid_max", "868686")
//Clean up all left resrouce/settings
exutil.By("Remove configmap from management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "openshift-node")
})
g.It("NonPreRelease-Longduration-HyperShiftMGMT-Author:liqcui-Medium-53880-NTO apply one configmap that reference to two separated hosted clusters on hypershift. [Disruptive]", func() {
//Second Hosted Cluster
guestClusterName2, guestClusterKube2, hostedClusterNS2 = exutil.ValidHypershiftAndGetGuestKubeConf4SecondHostedCluster(oc)
e2e.Logf("%s, %s, %s", guestClusterName2, guestClusterKube2, hostedClusterNS2)
guestClusterNS2 = hostedClusterNS2 + "-" + guestClusterName2
e2e.Logf("HostedClusterControlPlaneNS: %v", guestClusterNS2)
// ensure NTO operator is installed
isNTO2 = isHyperNTOPodInstalled(oc, guestClusterNS2)
// test requires NTO to be installed
if !isNTO || !isNTO2 {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio in management cluster")
exutil.ApplyNsResourceFromTemplate(oc, hostedClusterNS, "--ignore-unknown-parameters=true", "-f", tunedWithNodeLevelProfileName, "-p", "TUNEDPROFILENAME=hc-nodepool-vmdratio", "SYSCTLPARM=vm.dirty_ratio", "SYSCTLVALUE=56", "PRIORITY=20", "INCLUDE=openshift-node")
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
exutil.By("Ge the default nodepool in hosted cluster as first nodepool")
firstNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(firstNodePoolName).NotTo(o.BeEmpty())
exutil.By("Ge the default nodepool in hosted cluster as second nodepool")
secondNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName2, hostedClusterNS2)
o.Expect(secondNodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in default node pool of first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
workerNodeNameInFirstNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, firstNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInFirstNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in nodepool in first hosted cluster: %v", workerNodeNameInFirstNodepool)
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "openshift-node")
defer oc.SetGuestKubeconf(guestClusterKube)
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Pick one worker node in default node pool of second hosted cluster")
workerNodeNameInSecondNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, secondNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInSecondNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in nodepool in second hosted cluster: %v", workerNodeNameInSecondNodepool)
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc.SetGuestKubeconf(guestClusterKube2), ntoNamespace, workerNodeNameInSecondNodepool, "openshift-node")
defer oc.SetGuestKubeconf(guestClusterKube2)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS2, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS2, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Apply the tuned profile in first nodepool {firstNodePoolName}
exutil.By("Apply the tuned profile in default nodepool {firstNodePoolName} in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("Check if the configmap tuned-{firstNodePoolName} created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+firstNodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + firstNodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx is created in hosted cluster nodepool")
AssertIfTunedIsReadyByNameInHostedCluster(oc, "hc-nodepool-vmdratio", ntoNamespace)
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInFirstNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInFirstNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInFirstNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInFirstNodePool)
exutil.By("Check if the tuned profile applied to first custom nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in the first nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in first hosted cluster, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the second nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Apply the tuned profile in second nodepool
exutil.By("Apply the tuned profile in second nodepool of second hosted cluster in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS2, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx is created in hosted cluster nodepool")
AssertIfTunedIsReadyByNameInHostedCluster(oc, "hc-nodepool-vmdratio", ntoNamespace)
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInSecondNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInSecondNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInSecondNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInSecondNodePool)
exutil.By("Check if the tuned profile applied to second nodepool worker nodes in second hosted cluster")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in in second hosted cluster.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes of default nodepool in second hosted cluster, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from the nodepool in first hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the second hosted cluster, no impact with removing vm.dirty_ratio setting in first hosted cluster")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from the nodepool in second hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the second hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Clean up all left resrouce/settings
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS2, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log in first hosted")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool in first hosted cluster, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "openshift-node")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log in second hosted")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool in second hosted cluster, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "openshift-node")
})
g.It("NonPreRelease-Longduration-HyperShiftMGMT-Author:liqcui-Medium-53883-NTO can apply different tunings to two separated hosted clusters on hypershift. [Disruptive]", func() {
//Second Hosted Cluster
guestClusterName2, guestClusterKube2, hostedClusterNS2 = exutil.ValidHypershiftAndGetGuestKubeConf4SecondHostedCluster(oc)
e2e.Logf("%s, %s, %s", guestClusterName2, guestClusterKube2, hostedClusterNS2)
guestClusterNS2 = hostedClusterNS2 + "-" + guestClusterName2
e2e.Logf("HostedClusterControlPlaneNS: %v", guestClusterNS2)
// ensure NTO operator is installed
isNTO2 = isHyperNTOPodInstalled(oc, guestClusterNS2)
// test requires NTO to be installed
if !isNTO || !isNTO2 {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS2, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio in management cluster")
exutil.ApplyNsResourceFromTemplate(oc, hostedClusterNS, "--ignore-unknown-parameters=true", "-f", tunedWithNodeLevelProfileName, "-p", "TUNEDPROFILENAME=hc-nodepool-vmdratio", "SYSCTLPARM=vm.dirty_ratio", "SYSCTLVALUE=56", "PRIORITY=20", "INCLUDE=openshift-node")
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
exutil.By("Create configmap hc-nodepool-pidmax in management cluster")
exutil.ApplyNsResourceFromTemplate(oc, hostedClusterNS, "--ignore-unknown-parameters=true", "-f", tunedWithNodeLevelProfileName, "-p", "TUNEDPROFILENAME=hc-nodepool-pidmax", "SYSCTLPARM=kernel.pid_max", "SYSCTLVALUE=868686", "PRIORITY=20", "INCLUDE=openshift-node")
configmapsInMgmtClusters, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-pidmax"))
exutil.By("Ge the default nodepool in hosted cluster as first nodepool")
firstNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(firstNodePoolName).NotTo(o.BeEmpty())
exutil.By("Ge the default nodepool in hosted cluster as second nodepool")
secondNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName2, hostedClusterNS2)
o.Expect(secondNodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in default node pool of first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
workerNodeNameInFirstNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, firstNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInFirstNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in nodepool in first hosted cluster: %v", workerNodeNameInFirstNodepool)
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "openshift-node")
defer oc.SetGuestKubeconf(guestClusterKube)
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Pick one worker node in default node pool of second hosted cluster")
workerNodeNameInSecondNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, secondNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInSecondNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in nodepool in second hosted cluster: %v", workerNodeNameInSecondNodepool)
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondNodepool, "openshift-node")
defer oc.SetGuestKubeconf(guestClusterKube2)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS2, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS2, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Apply the tuned profile in first nodepool {firstNodePoolName}
exutil.By("Apply the tuned profile in default nodepool {firstNodePoolName} in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("Check if the configmap tuned-{firstNodePoolName} created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+firstNodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + firstNodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx is created in hosted cluster nodepool")
AssertIfTunedIsReadyByNameInHostedCluster(oc, "hc-nodepool-vmdratio", ntoNamespace)
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInFirstNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInFirstNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInFirstNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInFirstNodePool)
exutil.By("Check if the tuned profile applied to first custom nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in the first nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in first hosted cluster, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applies default settings on worker nodes in the first hosted cluster, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applies default settings on worker nodes in the second hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applies default settings on worker nodes in the second hosted cluster, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Apply the tuned profile in second nodepool
exutil.By("Apply the tuned profile in second nodepool of second hosted cluster in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS2, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-pidmax\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned hc-nodepool-pidmax-xxxxxx is created in hosted cluster nodepool")
AssertIfTunedIsReadyByNameInHostedCluster(oc, "hc-nodepool-pidmax", ntoNamespace)
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInSecondNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInSecondNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInSecondNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInSecondNodePool)
exutil.By("Check if the tuned profile applied to second nodepool worker nodes in second hosted cluster")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondNodepool, "hc-nodepool-pidmax")
exutil.By("Check if the tuned profile applied to all worker node in in second hosted cluster.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "hc-nodepool-pidmax")
exutil.By("Assert recommended profile (hc-nodepool-pidmax) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(hc-nodepool-pidmax\) matches current configuration|static tuning from profile 'hc-nodepool-pidmax' applied`)
exutil.By("Check if the setting of sysctl kernel.pid_max applies on worker nodes in second hosted cluster, expected value is 868686")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Check if the setting of sysctl vm.dirty_ratio applies default settings on worker nodes in the second hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("Check if the setting of sysctl kernel.pid_max applies default settings on worker nodes in the first hosted cluster, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Remove the custom tuned profile from the nodepool in first hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't apply to worker nodes in the first hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Check if the setting of sysctl kernel.pid_max still apply to worker nodes in the second hosted cluster, no impact with removing vm.dirty_ratio setting in first hosted cluster")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Remove the custom tuned profile from the nodepool in second hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("All settings of vm.dirty_ratio and kernel.pid_max rollback to default settings ...")
exutil.By("Check if the setting of sysctl vm.dirty_ratio applies default settings on worker nodes in the first hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applies default settings on worker nodes in the first hosted cluster, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applies default settings on worker nodes in the second hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applies default settings on worker nodes in the second hosted cluster, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Clean up all left resrouce/settings
exutil.By("Remove configmap from management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS2, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log in first hosted")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool in first hosted cluster, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "openshift-node")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log in second hosted")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool in second hosted cluster, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "openshift-node")
})
})
|
package hypernto
| ||||
test case
|
openshift/openshift-tests-private
|
58a07e52-7bfa-4120-b414-083542c7f07c
|
HyperShiftMGMT-Author:liqcui-Medium-53875-NTO Support profile that have the same name with tuned on hypershift [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("HyperShiftMGMT-Author:liqcui-Medium-53875-NTO Support profile that have the same name with tuned on hypershift [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-pidmax in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithSameProfileNameAKSPidmax)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-pidmax"))
//Apply tuned profile to hosted clusters
exutil.By("Apply tunedCconfig hc-nodepool-pidmax in hosted cluster nodepool")
nodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(nodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in hosted cluster, this worker node will be labeled with hc-nodepool-pidmax=")
workerNodeName, err := exutil.GetFirstLinuxWorkerNodeInHostedCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-pidmax\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap hc-nodepool-pidmax created in hosted cluster nodepool")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, nodePoolName)
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + nodePoolName))
exutil.By("Check if the tuned hc-nodepool-pidmax is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-pidmax"))
exutil.By("Get the tuned pod name that running on labeled node with hc-nodepool-pidmax=")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Label the worker nodes with hc-nodepool-pidmax=")
defer oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-pidmax-").Execute()
err = oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-pidmax=").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned profile applied to labeled worker nodes with hc-nodepool-pidmax=")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "hc-nodepool-pidmax")
exutil.By("Assert recommended profile (hc-nodepool-pidmax) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(hc-nodepool-pidmax\) matches current configuration|static tuning from profile 'hc-nodepool-pidmax' applied`)
exutil.By("Check if the setting of sysctl kernel.pid_max applied to labeled worker nodes, expected value is 868686")
compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Remove the custom tuned profile from node pool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Remove custom tuned profile to check if kernel.pid_max rollback to origin value
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from labeled worker nodes, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
pidMaxValue := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max")
o.Expect(pidMaxValue).NotTo(o.BeEmpty())
o.Expect(pidMaxValue).NotTo(o.ContainSubstring("868686"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
6f85aa68-7d22-4202-a858-5e7645160c10
|
HyperShiftMGMT-Author:liqcui-Medium-53876-NTO Operand logs errors when applying profile with invalid settings in HyperShift. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("HyperShiftMGMT-Author:liqcui-Medium-53876-NTO Operand logs errors when applying profile with invalid settings in HyperShift. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in hostedClusterNS namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-invalid", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-invalid in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithInvalidProfileName)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-invalid"))
//Apply tuned profile to hosted clusters
exutil.By("Apply tunedCconfig hc-nodepool-invalid in hosted cluster nodepool")
nodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(nodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in hosted cluster, this worker node will be labeled with hc-nodepool-invalid=")
workerNodeName, err := exutil.GetFirstLinuxWorkerNodeInHostedCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-invalid\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap hc-nodepool-invalid created in hosted cluster nodepool")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, nodePoolName)
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + nodePoolName))
exutil.By("Check if the tuned hc-nodepool-invalid is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-invalid"))
exutil.By("Get the tuned pod name that running on labeled node with hc-nodepool-invalid=")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Label the worker nodes with hc-nodepool-invalid=")
defer oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-invalid-").Execute()
err = oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-invalid=").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned profile applied to labeled worker nodes with hc-nodepool-invalid=")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "hc-nodepool-invalid")
exutil.By("Assert recommended profile (hc-nodepool-invalid) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(hc-nodepool-invalid\) matches current configuration|static tuning from profile 'hc-nodepool-invalid' applied`)
exutil.By("Assert Failed to read sysctl parameter in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "20", 300, `failed to read the original value|sysctl option kernel.pid_maxinvalid will not be set`)
expectedDegradedStatus, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io", workerNodeName, `-ojsonpath='{.status.conditions[?(@.type=="Degraded")].status}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(expectedDegradedStatus).NotTo(o.BeEmpty())
o.Expect(expectedDegradedStatus).To(o.ContainSubstring("True"))
exutil.By("Check if the setting of sysctl kernel.pid_max applied to labeled worker nodes, expected value is 868686")
compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to labeled worker nodes, expected value is 56")
compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from node pool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Remove custom tuned profile to check if kernel.pid_max and vm.dirty_ratio rollback to origin value
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-invalid", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from labeled worker nodes, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
pidMaxValue := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max")
o.Expect(pidMaxValue).NotTo(o.BeEmpty())
o.Expect(pidMaxValue).NotTo(o.ContainSubstring("868686"))
vmDirtyRatioValue := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "vm.dirty_ratio")
o.Expect(vmDirtyRatioValue).NotTo(o.BeEmpty())
o.Expect(vmDirtyRatioValue).NotTo(o.ContainSubstring("56"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
9de159ed-d1b0-44f1-822e-e2ce1a9ac3aa
|
HyperShiftMGMT-Author:liqcui-Medium-53877-NTO support tuning sysctl that applied to all nodes of nodepool-level settings in hypershift. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("HyperShiftMGMT-Author:liqcui-Medium-53877-NTO support tuning sysctl that applied to all nodes of nodepool-level settings in hypershift. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSVMRatio)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
//Apply tuned profile to hosted clusters
exutil.By("Apply tunedCconfig hc-nodepool-vmdratio in hosted cluster nodepool")
nodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(nodePoolName).NotTo(o.BeEmpty())
workerNodeName, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap hc-nodepool-vmdratio created in hosted cluster nodepool")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, nodePoolName)
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + nodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-vmdratio"))
exutil.By("Get the tuned pod name that running on first node of nodepool")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Check if the tuned profile applied to all worker node in specifed nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to labeled worker nodes, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from node pool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Remove custom tuned profile to check if kernel.pid_max and vm.dirty_ratio rollback to origin value
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "openshift-node")
exutil.By("The value of vm.dirty_ratio on specified nodepool should not equal to 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0eae62f2-0902-4653-b0b2-3a851303337f
|
HyperShiftMGMT-Author:liqcui-Medium-53886-NTO support tuning sysctl with different name that applied to one labeled node of nodepool in hypershift. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("HyperShiftMGMT-Author:liqcui-Medium-53886-NTO support tuning sysctl with different name that applied to one labeled node of nodepool in hypershift. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax-cm", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-pidmax in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithDiffProfileNameAKSPidmax)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-pidmax-cm"))
//Apply tuned profile to hosted clusters
exutil.By("Apply tunedCconfig hc-nodepool-pidmax in hosted cluster nodepool")
nodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(nodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in hosted cluster, this worker node will be labeled with hc-nodepool-pidmax=")
workerNodeName, err := exutil.GetFirstLinuxWorkerNodeInHostedCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-pidmax-cm\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap hc-nodepool-pidmax created in hosted cluster nodepool")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, nodePoolName)
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + nodePoolName))
exutil.By("Check if the tuned hc-nodepool-pidmax is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-pidmax-tuned"))
exutil.By("Get the tuned pod name that running on labeled node with hc-nodepool-pidmax=")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Label the worker nodes with hc-nodepool-pidmax=")
defer oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-pidmax-").Execute()
err = oc.AsAdmin().AsGuestKubeconf().Run("label").Args("node", workerNodeName, "hc-nodepool-pidmax=").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned profile applied to labeled worker nodes with hc-nodepool-pidmax=")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "hc-nodepool-pidmax-profile")
exutil.By("Assert recommended profile (hc-nodepool-pidmax-profile) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(hc-nodepool-pidmax-profile\) matches current configuration|static tuning from profile 'hc-nodepool-pidmax-profile' applied`)
exutil.By("Check if the setting of sysctl kernel.pid_max applied to labeled worker nodes, expected value is 868686")
compareSpecifiedValueByNameOnLabelNodeWithRetryInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Remove the custom tuned profile from node pool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Remove custom tuned profile to check if kernel.pid_max rollback to origin value
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax-cm", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from labeled worker nodes, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
pidMaxValue := getTunedSystemSetValueByParamNameInHostedCluster(oc, ntoNamespace, workerNodeName, "sysctl", "kernel.pid_max")
o.Expect(pidMaxValue).NotTo(o.BeEmpty())
o.Expect(pidMaxValue).NotTo(o.ContainSubstring("868686"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
d7800424-3b0c-45b4-bc5f-8672c5d54058
|
Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-54522-NTO Applying tuning which requires kernel boot parameters. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-54522-NTO Applying tuning which requires kernel boot parameters. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--ignore-not-found").Execute()
isMatch := exutil.CheckAllNodepoolReadyByHostedClusterName(oc, "hugepages-nodepool", hostedClusterNS, 300)
o.Expect(isMatch).To(o.Equal(true))
}()
exutil.By("Create custom node pool in hosted cluster")
if iaasPlatform == "aws" {
exutil.CreateCustomNodePoolInHypershift(oc, "aws", guestClusterName, "hugepages-nodepool", "1", "m5.xlarge", "InPlace", hostedClusterNS, "")
} else if iaasPlatform == "azure" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "azure", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
} else if iaasPlatform == "aks" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "aks", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
}
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, "hugepages-nodepool", 900, hostedClusterNS)
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hugepages", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap tuned-hugepages in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithKernelBootProfileName)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hugepages"))
exutil.By("Pick one worker node in custom node pool of hosted cluster")
workerNodeName, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, "hugepages-nodepool")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hugepages-nodepool", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"tuned-hugepages\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-hugepages-nodepool created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "hugepages-nodepool")
o.Expect(configMaps).To(o.ContainSubstring("hugepages-nodepool"))
exutil.By("Check if the configmap applied to tuned-hugepages-nodepool in management cluster")
exutil.AssertIfNodePoolUpdatingConfigByName(oc, "hugepages-nodepool", 360, hostedClusterNS)
exutil.By("Check if the tuned hugepages-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hugepages"))
exutil.By("Get the tuned pod name that running on custom node pool worker node")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Check if the tuned profile applied to custom node pool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node-hugepages")
exutil.By("Assert hugepagesz match in /proc/cmdline on the worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz", true)
exutil.By("Remove the custom tuned profile from node pool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hugepages", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hugepages", "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the worker node is ready after reboot due to removing kernel boot settings")
AssertIfNodeIsReadyByNodeNameInHostedCluster(oc, workerNodeName, 360)
exutil.By("Check if the removed configmap applied to tuned-hugepages-nodepool in management cluster")
exutil.AssertIfNodePoolUpdatingConfigByName(oc, "hugepages-nodepool", 360, hostedClusterNS)
exutil.By("Check if the custom tuned profile removed from labeled worker nodes, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
exutil.By("Assert hugepagesz match in /proc/cmdline on the worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz", false)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
6b787fc7-f494-4526-866f-5c9ee4aede52
|
Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-56609-NTO Scale out node pool which applied tuning with required kernel boot. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-56609-NTO Scale out node pool which applied tuning with required kernel boot. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--ignore-not-found").Execute()
isMatch := exutil.CheckAllNodepoolReadyByHostedClusterName(oc, "hugepages-nodepool", hostedClusterNS, 300)
o.Expect(isMatch).To(o.Equal(true))
}()
exutil.By("Create custom node pool in hosted cluster")
if iaasPlatform == "aws" {
exutil.CreateCustomNodePoolInHypershift(oc, "aws", guestClusterName, "hugepages-nodepool", "1", "m5.xlarge", "InPlace", hostedClusterNS, "")
} else if iaasPlatform == "azure" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "azure", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
} else if iaasPlatform == "aks" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "aks", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
}
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, "hugepages-nodepool", 720, hostedClusterNS)
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hugepages", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap tuned-hugepages in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithKernelBootProfileName)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hugepages"))
exutil.By("Pick one worker node in custom node pool of hosted cluster")
workerNodeName, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, "hugepages-nodepool")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hugepages-nodepool", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"tuned-hugepages\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-hugepages-nodepool created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "hugepages-nodepool")
o.Expect(configMaps).To(o.ContainSubstring("hugepages-nodepool"))
exutil.By("Check if the configmap applied to tuned-hugepages-nodepool in management cluster")
exutil.AssertIfNodePoolUpdatingConfigByName(oc, "hugepages-nodepool", 360, hostedClusterNS)
exutil.By("Check if the tuned hugepages-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hugepages"))
exutil.By("Get the tuned pod name that running on custom node pool worker node")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Check if the tuned profile applied to custom node pool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node-hugepages")
exutil.By("Assert hugepagesz match in /proc/cmdline on the worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz", true)
exutil.By("Scale out a new worker node in custom nodepool hugepages-nodepool")
err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--replicas=2").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if updating config applied to custom node pool in hosted cluster")
exutil.AssertIfNodePoolUpdatingConfigByName(oc, "hugepages-nodepool", 720, hostedClusterNS)
exutil.By("Check if the custom tuned profile openshift-node-hugepages applied to all nodes of custom nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "openshift-node-hugepages")
exutil.By("Assert hugepagesz match in /proc/cmdline on all nodes include the second new worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz", true)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
3848a373-17cf-42f2-b753-b09029f56e24
|
Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-55360-NTO does not generate MachineConfigs with bootcmdline from manual change to Profile status.bootcmdline. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-55360-NTO does not generate MachineConfigs with bootcmdline from manual change to Profile status.bootcmdline. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--ignore-not-found").Execute()
isMatch := exutil.CheckAllNodepoolReadyByHostedClusterName(oc, "hugepages-nodepool", hostedClusterNS, 300)
o.Expect(isMatch).To(o.Equal(true))
}()
exutil.By("Create custom node pool in hosted cluster")
if iaasPlatform == "aws" {
exutil.CreateCustomNodePoolInHypershift(oc, "aws", guestClusterName, "hugepages-nodepool", "1", "m5.xlarge", "InPlace", hostedClusterNS, "")
} else if iaasPlatform == "azure" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "azure", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
} else if iaasPlatform == "aks" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "aks", guestClusterName, "hugepages-nodepool", "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
}
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, "hugepages-nodepool", 720, hostedClusterNS)
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hugepages", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap tuned-hugepages in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithKernelBootProfileName)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hugepages"))
exutil.By("Pick one worker node in custom node pool of hosted cluster")
workerNodeName, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, "hugepages-nodepool")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeName).NotTo(o.BeEmpty())
e2e.Logf("Worker Node: %v", workerNodeName)
exutil.By("Get operator pod name in hosted cluster controlplane namespaceh")
ntoOperatorPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", guestClusterNS, "-lname=cluster-node-tuning-operator", "-ojsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ntoOperatorPodName).NotTo(o.BeEmpty())
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hugepages-nodepool", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Enable tuned in hosted clusters
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", "hugepages-nodepool", "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"tuned-hugepages\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-hugepages-nodepool created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "hugepages-nodepool")
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("hugepages-nodepool"))
exutil.By("Check if the configmap applied to tuned-hugepages-nodepool in management cluster")
exutil.AssertIfNodePoolUpdatingConfigByName(oc, "hugepages-nodepool", 360, hostedClusterNS)
exutil.By("Check if the tuned hugepages-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hugepages"))
exutil.By("Get the tuned pod name that running on custom node pool worker node")
tunedPodName, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodName).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Check if the tuned profile applied to custom node pool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeName, "openshift-node-hugepages")
exutil.By("Assert hugepagesz match in /proc/cmdline on the worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz", true)
exutil.By("Manually change the hugepage value in the worker node of custom nodepool hugepages-nodepool in hosted clusters")
err = oc.AsAdmin().AsGuestKubeconf().Run("patch").Args("-n", ntoNamespace, "profile/"+workerNodeName, "--type", "merge", "-p", `{"status":{"bootcmdline": "hugepagesz=2M hugepages=10"}}`).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the value of profile change in the worker node of custom nodepool hugepages-nodepool in hosted clusters, the expected value is still hugepagesz=2M hugepages=50")
bootCMDLinestdOut, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("-n", ntoNamespace, "profile/"+workerNodeName, "-ojsonpath='{.status.bootcmdline}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("status.bootcmdline is: %v", bootCMDLinestdOut)
o.Expect(bootCMDLinestdOut).NotTo(o.ContainSubstring("hugepagesz=2M hugepages=50"))
//The field of bootcmdline has been deprecated
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, "hugepages-nodepool", 360, hostedClusterNS)
exutil.By("Check if the custom tuned profile openshift-node-hugepages applied to all nodes of custom nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "openshift-node-hugepages")
exutil.By("Assert hugepagesz match in /proc/cmdline on all nodes include the second new worker node in custom node pool")
assertIfMatchKenelBootOnNodePoolLevelInHostedCluster(oc, ntoNamespace, "hugepages-nodepool", "hugepagesz=2M hugepages=50", true)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0c194e60-f4fc-4a85-970f-e8a791cf5588
|
Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-55359-NTO applies one configmap that is referenced in two nodepools in the same hosted cluster. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-55359-NTO applies one configmap that is referenced in two nodepools in the same hosted cluster. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
firstNodePoolName = "hc-custom-nodepool"
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSVMRatio)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--ignore-not-found").Execute()
isMatch := exutil.CheckAllNodepoolReadyByHostedClusterName(oc, firstNodePoolName, hostedClusterNS, 300)
o.Expect(isMatch).To(o.Equal(true))
}()
exutil.By("Create custom node pool in hosted cluster")
if iaasPlatform == "aws" {
exutil.CreateCustomNodePoolInHypershift(oc, "aws", guestClusterName, firstNodePoolName, "1", "m5.xlarge", "InPlace", hostedClusterNS, "")
} else if iaasPlatform == "azure" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "azure", guestClusterName, firstNodePoolName, "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
} else if iaasPlatform == "aks" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "aks", guestClusterName, firstNodePoolName, "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
}
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, firstNodePoolName, 720, hostedClusterNS)
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
secondNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(secondNodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in first custom node pool of hosted cluster")
workerNodeNameInFirstNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, firstNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInFirstNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in first nodepool: %v", workerNodeNameInFirstNodepool)
exutil.By("Pick one worker node in second node pool of hosted cluster")
workerNodeNameInSecondtNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, secondNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInSecondtNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in second nodepool: %v", workerNodeNameInSecondtNodepool)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "openshift-node")
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondtNodepool, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hc-nodepool-vmdratio", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Apply the tuned profile in first nodepool {firstNodePoolName}
exutil.By("Apply the tuned profile in first nodepool {firstNodePoolName} in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-{firstNodePoolName} created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+firstNodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + firstNodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-vmdratio"))
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInFirstNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInFirstNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInFirstNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInFirstNodePool)
exutil.By("Check if the tuned profile applied to first custom nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in the first nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the first custom nodepool, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the second nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Apply the tuned profile in second nodepool
exutil.By("Apply the tuned profile in second nodepool in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the tuned pod name that running on second nodepool worker node")
tunedPodNameInSecondNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInSecondtNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInSecondNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInSecondNodePool)
exutil.By("Check if the tuned profile applied to second nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondtNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in the first nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the second nodepool, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from the first nodepool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the second nodepool, no impact with removing vm.dirty_ratio setting in first nodepool")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from the second nodepool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the second nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Clean up all left resrouce/settings
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "openshift-node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "openshift-node")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
dc019b98-76f1-480c-b030-73cd5e51f9c3
|
Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-53885-NTO applies different configmaps that reference to into two node pool in the same hosted clusters. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("Longduration-NonPreRelease-HyperShiftMGMT-Author:liqcui-Medium-53885-NTO applies different configmaps that reference to into two node pool in the same hosted clusters. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
firstNodePoolName = "hc-custom-nodepool"
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio and hc-nodepool-pidmax in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSVMRatio)
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSPidmax)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-pidmax"))
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--ignore-not-found").Execute()
isMatch := exutil.CheckAllNodepoolReadyByHostedClusterName(oc, firstNodePoolName, hostedClusterNS, 300)
o.Expect(isMatch).To(o.Equal(true))
}()
exutil.By("Create custom node pool in hosted cluster")
if iaasPlatform == "aws" {
exutil.CreateCustomNodePoolInHypershift(oc, "aws", guestClusterName, firstNodePoolName, "1", "m5.xlarge", "InPlace", hostedClusterNS, "")
} else if iaasPlatform == "azure" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "azure", guestClusterName, firstNodePoolName, "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
} else if iaasPlatform == "aks" {
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
defaultNodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(defaultNodePoolName).NotTo(o.BeEmpty())
exutil.CreateCustomNodePoolInHypershift(oc, "aks", guestClusterName, firstNodePoolName, "1", "Standard_D4s_v4", "InPlace", hostedClusterNS, defaultNodePoolName)
}
exutil.By("Check if custom node pool is ready in hosted cluster")
exutil.AssertIfNodePoolIsReadyByName(oc, firstNodePoolName, 720, hostedClusterNS)
//Apply tuned profile to hosted clusters
exutil.By("Ge the default nodepool in hosted cluster as secondary nodepool")
secondNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(secondNodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in first custom node pool of hosted cluster")
workerNodeNameInFirstNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, firstNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInFirstNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in first nodepool: %v", workerNodeNameInFirstNodepool)
exutil.By("Pick one worker node in second node pool of hosted cluster")
workerNodeNameInSecondtNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, secondNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInSecondtNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in second nodepool: %v", workerNodeNameInSecondtNodepool)
//Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "openshift-node")
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondtNodepool, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hc-nodepool-vmdratio", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hc-nodepool-pidmax", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Apply the tuned profile in first nodepool {firstNodePoolName}
exutil.By("Apply the tuned profile in first nodepool {firstNodePoolName} in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Apply the tuned profile in second nodepool {secondNodePoolName}
exutil.By("Apply the tuned profile in second nodepool {secondNodePoolName} in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-pidmax\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-{firstNodePoolName} created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+firstNodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + firstNodePoolName))
exutil.By("Check if the configmap tuned-{secondNodePoolName} created in corresponding hosted ns in management cluster")
configMaps = getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+secondNodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + secondNodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx and hc-nodepool-pidmax-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-vmdratio"))
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-pidmax"))
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInFirstNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInFirstNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInFirstNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInFirstNodePool)
exutil.By("Get the tuned pod name that running on second nodepool worker node")
tunedPodNameInSecondNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInSecondtNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInSecondNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInSecondNodePool)
exutil.By("Check if the tuned profile applied to first custom nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to second nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondtNodepool, "hc-nodepool-pidmax")
exutil.By("Check if the tuned profile applied to all worker node in the first nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in the second nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "hc-nodepool-pidmax")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Assert recommended profile (hc-nodepool-pidmax) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(hc-nodepool-pidmax\) matches current configuration|static tuning from profile 'hc-nodepool-pidmax' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the first custom nodepool, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applied to worker nodes in the second custom nodepool, expected value is 868686")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Compare the sysctl kernel.pid_max not equal to 868686 in first nodepool
exutil.By("Check if the setting of sysctl kernel.pid_max shouldn't applied to worker nodes in the first nodepool, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Compare the sysctl vm.dirty_ratio not equal to 56 in second nodepool
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the second nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from the first nodepool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned profile still applied to all worker node in the second nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "hc-nodepool-pidmax")
//Compare the sysctl vm.dirty_ratio not equal to 56 in first nodepool
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max still applied to worker nodes in the second nodepool, no impact with removing vm.dirty_ratio setting in first nodepool")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Remove the custom tuned profile from the second nodepool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Compare the sysctl vm.dirty_ratio not equal to 56 in first nodepool
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl kernel.pid_max not equal to 868686 in second nodepool
exutil.By("Check if the setting of sysctl kernel.pid_max shouldn't applied to worker nodes in the second nodepool, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Clean up all left resrouce/settings
exutil.By("Remove configmap from management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "openshift-node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "openshift-node")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
7a8c22b3-ddc0-4764-9b4d-f75ef38237f9
|
HyperShiftMGMT-Author:liqcui-Medium-54546-NTO applies two Tuneds from two configmap referenced in one nodepool of a hosted cluster on hypershift.[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("HyperShiftMGMT-Author:liqcui-Medium-54546-NTO applies two Tuneds from two configmap referenced in one nodepool of a hosted cluster on hypershift.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure", "aks"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio and hc-nodepool-pidmax in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSVMRatio)
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSPidmax)
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-pidmax"))
//Apply tuned profile to hosted clusters
exutil.By("Get the default nodepool name in hosted cluster")
nodePoolName := getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(nodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in first custom node pool of hosted cluster")
workerNodeNameInNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, nodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in first nodepool: %v", workerNodeNameInNodepool)
// //Delete configmap in hosted cluster namespace and disable tuningConfig
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInNodepool, "openshift-node")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hc-nodepool-vmdratio", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-hc-nodepool-pidmax", "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Apply the tuned profile in nodepool in hostedcluster
exutil.By("Apply the tuned profile in default nodepool in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"},{\"name\": \"hc-nodepool-pidmax\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the configmap tuned-{nodePoolName} created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+nodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + nodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx and hc-nodepool-pidmax-xxxxxx is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-vmdratio"))
o.Expect(tunedNameList).To(o.ContainSubstring("hc-nodepool-pidmax"))
exutil.By("Get the tuned pod name that running on default nodepool worker node")
tunedPodNameInNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInNodePool)
exutil.By("Check if the tuned profile applied to nodepool worker nodes, the second profile hc-nodepool-pidmax take effective by default, the first one won't take effective")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInNodepool, "hc-nodepool-pidmax")
exutil.By("Check if the tuned profile applied to all worker node in the second nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "hc-nodepool-pidmax")
exutil.By("Assert recommended profile (hc-nodepool-pidmax) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInNodePool, "12", 300, `recommended profile \(hc-nodepool-pidmax\) matches current configuration|static tuning from profile 'hc-nodepool-pidmax' applied`)
exutil.By("Check if the setting of sysctl kernel.pid_max applied to worker nodes in the default nodepool, expected value is 868686")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "kernel.pid_max", "868686")
//Compare the sysctl vm.dirty_ratio not equal to 56 in default nodepool
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Chnagge the hc-nodepool-vmdratio with a higher priority in management cluster, the lower number of priority with higher priority")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSVMRatio18)
exutil.By("Check if the tuned profile hc-nodepool-vmdratio applied to all worker node in the nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the nodepool, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl kernel.pid_max not equal to 868686 in first nodepool
exutil.By("Check if the setting of sysctl kernel.pid_max shouldn't applied to worker nodes in the nodepool, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Chnagge custom profile include setting with <openshift-node,hc-nodepool-vmdratio> and set priority to 16 in management cluster, both custom profile take effective")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSPidmax16)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the nodepool, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applied to worker nodes in the default nodepool, expected value is 868686")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Chnagge the value of kernel.pid_max of custom profile hc-nodepool-pidmax in management cluster")
exutil.ApplyOperatorResourceByYaml(oc, hostedClusterNS, tunedWithNodeLevelProfileNameAKSPidmax1688)
exutil.By("Check if the setting of sysctl kernel.pid_max applied to worker nodes in the default nodepool, expected value is 888888")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "kernel.pid_max", "888888")
exutil.By("Remove the custom tuned profile from the first nodepool in hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", nodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Compare the sysctl vm.dirty_ratio not equal to 56 in first nodepool
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl kernel.pid_max not equal to 868686 in second nodepool
exutil.By("Check if the setting of sysctl kernel.pid_max shouldn't applied to worker nodes in the nodepool, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "sysctl", "kernel.pid_max", "868686")
//Clean up all left resrouce/settings
exutil.By("Remove configmap from management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+nodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, nodePoolName, "openshift-node")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
ec685f05-a6b4-4f26-ae10-7372215c1251
|
NonPreRelease-Longduration-HyperShiftMGMT-Author:liqcui-Medium-53880-NTO apply one configmap that reference to two separated hosted clusters on hypershift. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("NonPreRelease-Longduration-HyperShiftMGMT-Author:liqcui-Medium-53880-NTO apply one configmap that reference to two separated hosted clusters on hypershift. [Disruptive]", func() {
//Second Hosted Cluster
guestClusterName2, guestClusterKube2, hostedClusterNS2 = exutil.ValidHypershiftAndGetGuestKubeConf4SecondHostedCluster(oc)
e2e.Logf("%s, %s, %s", guestClusterName2, guestClusterKube2, hostedClusterNS2)
guestClusterNS2 = hostedClusterNS2 + "-" + guestClusterName2
e2e.Logf("HostedClusterControlPlaneNS: %v", guestClusterNS2)
// ensure NTO operator is installed
isNTO2 = isHyperNTOPodInstalled(oc, guestClusterNS2)
// test requires NTO to be installed
if !isNTO || !isNTO2 {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio in management cluster")
exutil.ApplyNsResourceFromTemplate(oc, hostedClusterNS, "--ignore-unknown-parameters=true", "-f", tunedWithNodeLevelProfileName, "-p", "TUNEDPROFILENAME=hc-nodepool-vmdratio", "SYSCTLPARM=vm.dirty_ratio", "SYSCTLVALUE=56", "PRIORITY=20", "INCLUDE=openshift-node")
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
exutil.By("Ge the default nodepool in hosted cluster as first nodepool")
firstNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(firstNodePoolName).NotTo(o.BeEmpty())
exutil.By("Ge the default nodepool in hosted cluster as second nodepool")
secondNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName2, hostedClusterNS2)
o.Expect(secondNodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in default node pool of first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
workerNodeNameInFirstNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, firstNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInFirstNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in nodepool in first hosted cluster: %v", workerNodeNameInFirstNodepool)
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "openshift-node")
defer oc.SetGuestKubeconf(guestClusterKube)
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Pick one worker node in default node pool of second hosted cluster")
workerNodeNameInSecondNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, secondNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInSecondNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in nodepool in second hosted cluster: %v", workerNodeNameInSecondNodepool)
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc.SetGuestKubeconf(guestClusterKube2), ntoNamespace, workerNodeNameInSecondNodepool, "openshift-node")
defer oc.SetGuestKubeconf(guestClusterKube2)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS2, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS2, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Apply the tuned profile in first nodepool {firstNodePoolName}
exutil.By("Apply the tuned profile in default nodepool {firstNodePoolName} in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("Check if the configmap tuned-{firstNodePoolName} created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+firstNodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + firstNodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx is created in hosted cluster nodepool")
AssertIfTunedIsReadyByNameInHostedCluster(oc, "hc-nodepool-vmdratio", ntoNamespace)
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInFirstNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInFirstNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInFirstNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInFirstNodePool)
exutil.By("Check if the tuned profile applied to first custom nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in the first nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in first hosted cluster, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the second nodepool, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Apply the tuned profile in second nodepool
exutil.By("Apply the tuned profile in second nodepool of second hosted cluster in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS2, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx is created in hosted cluster nodepool")
AssertIfTunedIsReadyByNameInHostedCluster(oc, "hc-nodepool-vmdratio", ntoNamespace)
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInSecondNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInSecondNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInSecondNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInSecondNodePool)
exutil.By("Check if the tuned profile applied to second nodepool worker nodes in second hosted cluster")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in in second hosted cluster.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes of default nodepool in second hosted cluster, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from the nodepool in first hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in the second hosted cluster, no impact with removing vm.dirty_ratio setting in first hosted cluster")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Remove the custom tuned profile from the nodepool in second hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the first hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't applied to worker nodes in the second hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
//Clean up all left resrouce/settings
exutil.By("Remove configmap from management cluster")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS2, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log in first hosted")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool in first hosted cluster, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "openshift-node")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log in second hosted")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool in second hosted cluster, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "openshift-node")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
ea419a78-63a0-490f-8bed-c25123d07a3c
|
NonPreRelease-Longduration-HyperShiftMGMT-Author:liqcui-Medium-53883-NTO can apply different tunings to two separated hosted clusters on hypershift. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/hypernto/hypernto.go
|
g.It("NonPreRelease-Longduration-HyperShiftMGMT-Author:liqcui-Medium-53883-NTO can apply different tunings to two separated hosted clusters on hypershift. [Disruptive]", func() {
//Second Hosted Cluster
guestClusterName2, guestClusterKube2, hostedClusterNS2 = exutil.ValidHypershiftAndGetGuestKubeConf4SecondHostedCluster(oc)
e2e.Logf("%s, %s, %s", guestClusterName2, guestClusterKube2, hostedClusterNS2)
guestClusterNS2 = hostedClusterNS2 + "-" + guestClusterName2
e2e.Logf("HostedClusterControlPlaneNS: %v", guestClusterNS2)
// ensure NTO operator is installed
isNTO2 = isHyperNTOPodInstalled(oc, guestClusterNS2)
// test requires NTO to be installed
if !isNTO || !isNTO2 {
g.Skip("NTO is not installed - skipping test ...")
}
supportPlatforms := []string{"aws", "azure"}
if !implStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Delete configmap in clusters namespace
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS2, "--ignore-not-found").Execute()
//Create configmap, it will create custom tuned profile based on this configmap
exutil.By("Create configmap hc-nodepool-vmdratio in management cluster")
exutil.ApplyNsResourceFromTemplate(oc, hostedClusterNS, "--ignore-unknown-parameters=true", "-f", tunedWithNodeLevelProfileName, "-p", "TUNEDPROFILENAME=hc-nodepool-vmdratio", "SYSCTLPARM=vm.dirty_ratio", "SYSCTLVALUE=56", "PRIORITY=20", "INCLUDE=openshift-node")
configmapsInMgmtClusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-vmdratio"))
exutil.By("Create configmap hc-nodepool-pidmax in management cluster")
exutil.ApplyNsResourceFromTemplate(oc, hostedClusterNS, "--ignore-unknown-parameters=true", "-f", tunedWithNodeLevelProfileName, "-p", "TUNEDPROFILENAME=hc-nodepool-pidmax", "SYSCTLPARM=kernel.pid_max", "SYSCTLVALUE=868686", "PRIORITY=20", "INCLUDE=openshift-node")
configmapsInMgmtClusters, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", hostedClusterNS).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(configmapsInMgmtClusters).NotTo(o.BeEmpty())
o.Expect(configmapsInMgmtClusters).To(o.ContainSubstring("hc-nodepool-pidmax"))
exutil.By("Ge the default nodepool in hosted cluster as first nodepool")
firstNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName, hostedClusterNS)
o.Expect(firstNodePoolName).NotTo(o.BeEmpty())
exutil.By("Ge the default nodepool in hosted cluster as second nodepool")
secondNodePoolName = getNodePoolNamebyHostedClusterName(oc, guestClusterName2, hostedClusterNS2)
o.Expect(secondNodePoolName).NotTo(o.BeEmpty())
exutil.By("Pick one worker node in default node pool of first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
workerNodeNameInFirstNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, firstNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInFirstNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in nodepool in first hosted cluster: %v", workerNodeNameInFirstNodepool)
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "openshift-node")
defer oc.SetGuestKubeconf(guestClusterKube)
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Pick one worker node in default node pool of second hosted cluster")
workerNodeNameInSecondNodepool, err := exutil.GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc, secondNodePoolName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerNodeNameInSecondNodepool).NotTo(o.BeEmpty())
e2e.Logf("Worker node in nodepool in second hosted cluster: %v", workerNodeNameInSecondNodepool)
defer assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondNodepool, "openshift-node")
defer oc.SetGuestKubeconf(guestClusterKube2)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS2, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS2, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
//Apply the tuned profile in first nodepool {firstNodePoolName}
exutil.By("Apply the tuned profile in default nodepool {firstNodePoolName} in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-vmdratio\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("Check if the configmap tuned-{firstNodePoolName} created in corresponding hosted ns in management cluster")
configMaps := getTuningConfigMapNameWithRetry(oc, guestClusterNS, "tuned-"+firstNodePoolName)
o.Expect(configMaps).NotTo(o.BeEmpty())
o.Expect(configMaps).To(o.ContainSubstring("tuned-" + firstNodePoolName))
exutil.By("Check if the tuned hc-nodepool-vmdratio-xxxxxx is created in hosted cluster nodepool")
AssertIfTunedIsReadyByNameInHostedCluster(oc, "hc-nodepool-vmdratio", ntoNamespace)
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInFirstNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInFirstNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInFirstNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInFirstNodePool)
exutil.By("Check if the tuned profile applied to first custom nodepool worker nodes")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInFirstNodepool, "hc-nodepool-vmdratio")
exutil.By("Check if the tuned profile applied to all worker node in the first nodepool.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "hc-nodepool-vmdratio")
exutil.By("Assert recommended profile (hc-nodepool-vmdratio) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(hc-nodepool-vmdratio\) matches current configuration|static tuning from profile 'hc-nodepool-vmdratio' applied`)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applied to worker nodes in first hosted cluster, expected value is 56")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applies default settings on worker nodes in the first hosted cluster, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applies default settings on worker nodes in the second hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applies default settings on worker nodes in the second hosted cluster, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Apply the tuned profile in second nodepool
exutil.By("Apply the tuned profile in second nodepool of second hosted cluster in management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS2, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[{\"name\": \"hc-nodepool-pidmax\"}]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if the tuned hc-nodepool-pidmax-xxxxxx is created in hosted cluster nodepool")
AssertIfTunedIsReadyByNameInHostedCluster(oc, "hc-nodepool-pidmax", ntoNamespace)
exutil.By("Get the tuned pod name that running on first custom nodepool worker node")
tunedPodNameInSecondNodePool, err := exutil.GetPodNameInHostedCluster(oc, ntoNamespace, "", workerNodeNameInSecondNodepool)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPodNameInSecondNodePool).NotTo(o.BeEmpty())
e2e.Logf("Tuned Pod: %v", tunedPodNameInSecondNodePool)
exutil.By("Check if the tuned profile applied to second nodepool worker nodes in second hosted cluster")
assertIfTunedProfileAppliedOnSpecifiedNodeInHostedCluster(oc, ntoNamespace, workerNodeNameInSecondNodepool, "hc-nodepool-pidmax")
exutil.By("Check if the tuned profile applied to all worker node in in second hosted cluster.")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "hc-nodepool-pidmax")
exutil.By("Assert recommended profile (hc-nodepool-pidmax) matches current configuration in tuned pod log")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(hc-nodepool-pidmax\) matches current configuration|static tuning from profile 'hc-nodepool-pidmax' applied`)
exutil.By("Check if the setting of sysctl kernel.pid_max applies on worker nodes in second hosted cluster, expected value is 868686")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Check if the setting of sysctl vm.dirty_ratio applies default settings on worker nodes in the second hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("Check if the setting of sysctl kernel.pid_max applies default settings on worker nodes in the first hosted cluster, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Remove the custom tuned profile from the nodepool in first hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", firstNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Compare the sysctl vm.dirty_ratio not equal to 56
exutil.By("Check if the setting of sysctl vm.dirty_ratio shouldn't apply to worker nodes in the first hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Check if the setting of sysctl kernel.pid_max still apply to worker nodes in the second hosted cluster, no impact with removing vm.dirty_ratio setting in first hosted cluster")
compareSpecifiedValueByNameOnNodePoolLevelWithRetryInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Remove the custom tuned profile from the nodepool in second hosted cluster ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("nodepool", secondNodePoolName, "-n", hostedClusterNS, "--type", "merge", "-p", "{\"spec\":{\"tuningConfig\":[]}}").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("All settings of vm.dirty_ratio and kernel.pid_max rollback to default settings ...")
exutil.By("Check if the setting of sysctl vm.dirty_ratio applies default settings on worker nodes in the first hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applies default settings on worker nodes in the first hosted cluster, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "sysctl", "kernel.pid_max", "868686")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Check if the setting of sysctl vm.dirty_ratio applies default settings on worker nodes in the second hosted cluster, expected value is default value, not equal 56")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "vm.dirty_ratio", "56")
exutil.By("Check if the setting of sysctl kernel.pid_max applies default settings on worker nodes in the second hosted cluster, expected value is default value, not equal 868686")
assertMisMatchTunedSystemSettingsByParamNameOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "sysctl", "kernel.pid_max", "868686")
//Clean up all left resrouce/settings
exutil.By("Remove configmap from management cluster")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-vmdratio", "-n", hostedClusterNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "hc-nodepool-pidmax", "-n", hostedClusterNS2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+firstNodePoolName, "-n", guestClusterNS, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "tuned-"+secondNodePoolName, "-n", guestClusterNS2, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Set first kubeconfig to access first hosted cluster")
oc.SetGuestKubeconf(guestClusterKube)
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log in first hosted")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInFirstNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool in first hosted cluster, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, firstNodePoolName, "openshift-node")
exutil.By("Set second kubeconfig to access second hosted cluster")
oc.SetGuestKubeconf(guestClusterKube2)
exutil.By("Assert recommended profile (openshift-node) matches current configuration in tuned pod log in second hosted")
assertNTOPodLogsLastLinesInHostedCluster(oc, ntoNamespace, tunedPodNameInSecondNodePool, "12", 300, `recommended profile \(openshift-node\) matches current configuration|static tuning from profile 'openshift-node' applied`)
exutil.By("Check if the custom tuned profile removed from worker nodes of nodepool in second hosted cluster, default openshift-node applied to worker node")
assertIfTunedProfileAppliedOnNodePoolLevelInHostedCluster(oc, ntoNamespace, secondNodePoolName, "openshift-node")
})
| ||||||
test
|
openshift/openshift-tests-private
|
117dbe82-e31d-41b5-a663-0f0afdd8ee57
|
nfd
|
import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/psap/nfd/nfd.go
|
package nfd
import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-node] PSAP should", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("nfd-test", exutil.KubeConfigPath())
apiNamespace = "openshift-machine-api"
iaasPlatform string
)
g.BeforeEach(func() {
// get IaaS platform
iaasPlatform = exutil.CheckPlatform(oc)
})
// author: [email protected]
g.It("Author:wabouham-Medium-43461-Add a new worker node on an NFD-enabled OCP cluster [Slow] [Flaky]", func() {
// currently test is only supported on AWS, GCP, and Azure
if iaasPlatform != "aws" && iaasPlatform != "gcp" && iaasPlatform != "azure" && iaasPlatform != "ibmcloud" && iaasPlatform != "alibabacloud" && iaasPlatform != "openstack" {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
stdOut, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifest", "nfd", "-n", "openshift-marketplace").Output()
if strings.Contains(stdOut, "NotFound") {
g.Skip("No NFD package manifest found, skipping test ...")
}
clusterVersion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(clusterVersion).NotTo(o.BeEmpty())
nfdVersion := exutil.GetNFDVersionbyPackageManifest(oc, "openshift-marketplace")
o.Expect(nfdVersion).NotTo(o.BeEmpty())
if nfdVersion != clusterVersion {
g.Skip("The nfd version " + nfdVersion + " mismatch cluster version " + clusterVersion + " skip creating instance")
}
// test requires NFD to be installed and an instance to be runnning
g.By("Deploy NFD Operator and create instance on Openshift Container Platform")
nfdInstalled := isPodInstalled(oc, nfdNamespace)
isNodeLabeled := exutil.IsNodeLabeledByNFD(oc)
if nfdInstalled && isNodeLabeled {
e2e.Logf("NFD installation and node label found! Continuing with test ...")
} else {
exutil.InstallNFD(oc, nfdNamespace)
exutil.CreateNFDInstance(oc, nfdNamespace)
}
haveMachineSet := exutil.IsMachineSetExist(oc)
if haveMachineSet {
g.By("Destroy newly created machineset and node once check is complete")
defer deleteMachineSet(oc, apiNamespace, "openshift-qe-nfd-machineset")
g.By("Get current machineset instance type")
machineSetInstanceType := exutil.GetMachineSetInstanceType(oc)
o.Expect(machineSetInstanceType).NotTo(o.BeEmpty())
g.By("Create a new machineset with name openshift-qe-nfd-machineset")
exutil.CreateMachinesetbyInstanceType(oc, "openshift-qe-nfd-machineset", machineSetInstanceType)
g.By("Wait for new node is ready when machineset created")
clusterinfra.WaitForMachinesRunning(oc, 1, "openshift-qe-nfd-machineset")
g.By("Check if new created worker node's label are created")
newWorkNode := exutil.GetNodeNameByMachineset(oc, "openshift-qe-nfd-machineset")
ocGetNodeLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", newWorkNode, "-ojsonpath={.metadata.labels}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ocGetNodeLabels).NotTo(o.BeEmpty())
o.Expect(strings.Contains(ocGetNodeLabels, "feature")).To(o.BeTrue())
} else {
e2e.Logf("No machineset detected and only deploy NFD and check labels")
g.By("Check that the NFD labels are created")
firstWorkerNodeName, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(firstWorkerNodeName).NotTo(o.BeEmpty())
ocGetNodeLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", firstWorkerNodeName, "-ojsonpath={.metadata.labels}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ocGetNodeLabels).NotTo(o.BeEmpty())
o.Expect(strings.Contains(ocGetNodeLabels, "feature")).To(o.BeTrue())
}
})
})
|
package nfd
| ||||
test case
|
openshift/openshift-tests-private
|
00b82f67-fcc9-403f-bb7a-44a32bdd2722
|
Author:wabouham-Medium-43461-Add a new worker node on an NFD-enabled OCP cluster [Slow] [Flaky]
|
['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nfd/nfd.go
|
g.It("Author:wabouham-Medium-43461-Add a new worker node on an NFD-enabled OCP cluster [Slow] [Flaky]", func() {
// currently test is only supported on AWS, GCP, and Azure
if iaasPlatform != "aws" && iaasPlatform != "gcp" && iaasPlatform != "azure" && iaasPlatform != "ibmcloud" && iaasPlatform != "alibabacloud" && iaasPlatform != "openstack" {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
stdOut, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifest", "nfd", "-n", "openshift-marketplace").Output()
if strings.Contains(stdOut, "NotFound") {
g.Skip("No NFD package manifest found, skipping test ...")
}
clusterVersion, _, err := exutil.GetClusterVersion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(clusterVersion).NotTo(o.BeEmpty())
nfdVersion := exutil.GetNFDVersionbyPackageManifest(oc, "openshift-marketplace")
o.Expect(nfdVersion).NotTo(o.BeEmpty())
if nfdVersion != clusterVersion {
g.Skip("The nfd version " + nfdVersion + " mismatch cluster version " + clusterVersion + " skip creating instance")
}
// test requires NFD to be installed and an instance to be runnning
g.By("Deploy NFD Operator and create instance on Openshift Container Platform")
nfdInstalled := isPodInstalled(oc, nfdNamespace)
isNodeLabeled := exutil.IsNodeLabeledByNFD(oc)
if nfdInstalled && isNodeLabeled {
e2e.Logf("NFD installation and node label found! Continuing with test ...")
} else {
exutil.InstallNFD(oc, nfdNamespace)
exutil.CreateNFDInstance(oc, nfdNamespace)
}
haveMachineSet := exutil.IsMachineSetExist(oc)
if haveMachineSet {
g.By("Destroy newly created machineset and node once check is complete")
defer deleteMachineSet(oc, apiNamespace, "openshift-qe-nfd-machineset")
g.By("Get current machineset instance type")
machineSetInstanceType := exutil.GetMachineSetInstanceType(oc)
o.Expect(machineSetInstanceType).NotTo(o.BeEmpty())
g.By("Create a new machineset with name openshift-qe-nfd-machineset")
exutil.CreateMachinesetbyInstanceType(oc, "openshift-qe-nfd-machineset", machineSetInstanceType)
g.By("Wait for new node is ready when machineset created")
clusterinfra.WaitForMachinesRunning(oc, 1, "openshift-qe-nfd-machineset")
g.By("Check if new created worker node's label are created")
newWorkNode := exutil.GetNodeNameByMachineset(oc, "openshift-qe-nfd-machineset")
ocGetNodeLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", newWorkNode, "-ojsonpath={.metadata.labels}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ocGetNodeLabels).NotTo(o.BeEmpty())
o.Expect(strings.Contains(ocGetNodeLabels, "feature")).To(o.BeTrue())
} else {
e2e.Logf("No machineset detected and only deploy NFD and check labels")
g.By("Check that the NFD labels are created")
firstWorkerNodeName, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(firstWorkerNodeName).NotTo(o.BeEmpty())
ocGetNodeLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", firstWorkerNodeName, "-ojsonpath={.metadata.labels}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ocGetNodeLabels).NotTo(o.BeEmpty())
o.Expect(strings.Contains(ocGetNodeLabels, "feature")).To(o.BeTrue())
}
})
| |||||
file
|
openshift/openshift-tests-private
|
6cbccd4c-65b0-4185-a6db-8e5a704acecd
|
nfd_util
|
import (
"context"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/psap/nfd/nfd_util.go
|
package nfd
import (
"context"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var (
nfdNamespace = "openshift-nfd"
nfd_namespace_file = exutil.FixturePath("testdata", "psap", "nfd", "nfd-namespace.yaml")
nfd_operatorgroup_file = exutil.FixturePath("testdata", "psap", "nfd", "nfd-operatorgroup.yaml")
nfd_sub_file = exutil.FixturePath("testdata", "psap", "nfd", "nfd-sub.yaml")
nfd_instance_file = exutil.FixturePath("testdata", "psap", "nfd", "nfd-instance.yaml")
)
// isPodInstalled will return true if any pod is found in the given namespace, and false otherwise
func isPodInstalled(oc *exutil.CLI, namespace string) bool {
e2e.Logf("Checking if pod is found in namespace %s...", namespace)
podList, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
if len(podList.Items) == 0 {
e2e.Logf("No pod found in namespace %s :(", namespace)
return false
}
e2e.Logf("Pod found in namespace %s!", namespace)
return true
}
// createYAMLFromMachineSet creates a YAML file with a given filename from a given machineset name in a given namespace, throws an error if creation fails
func createYAMLFromMachineSet(oc *exutil.CLI, namespace string, machineSetName string, filename string) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args(exutil.MapiMachineset, "-n", namespace, machineSetName, "-o", "yaml").OutputToFile(filename)
}
// createMachineSetFromYAML creates a new machineset from the YAML configuration in a given filename, throws an error if creation fails
func createMachineSetFromYAML(oc *exutil.CLI, filename string) error {
return oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filename).Execute()
}
// deleteMachineSet will delete a given machineset name from a given namespace
func deleteMachineSet(oc *exutil.CLI, namespace string, machineSetName string) error {
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(exutil.MapiMachineset, machineSetName, "-n", namespace).Execute()
}
|
package nfd
| ||||
function
|
openshift/openshift-tests-private
|
5beae9ea-432e-4336-9160-85d29aa4d3e8
|
isPodInstalled
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nfd/nfd_util.go
|
func isPodInstalled(oc *exutil.CLI, namespace string) bool {
e2e.Logf("Checking if pod is found in namespace %s...", namespace)
podList, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
if len(podList.Items) == 0 {
e2e.Logf("No pod found in namespace %s :(", namespace)
return false
}
e2e.Logf("Pod found in namespace %s!", namespace)
return true
}
|
nfd
| ||||
function
|
openshift/openshift-tests-private
|
0585e348-9f91-4ed9-aa90-52006b3b3e49
|
createYAMLFromMachineSet
|
github.com/openshift/openshift-tests-private/test/extended/psap/nfd/nfd_util.go
|
func createYAMLFromMachineSet(oc *exutil.CLI, namespace string, machineSetName string, filename string) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args(exutil.MapiMachineset, "-n", namespace, machineSetName, "-o", "yaml").OutputToFile(filename)
}
|
nfd
| |||||
function
|
openshift/openshift-tests-private
|
fbec8698-4afc-489a-bd32-aa0568219394
|
createMachineSetFromYAML
|
github.com/openshift/openshift-tests-private/test/extended/psap/nfd/nfd_util.go
|
func createMachineSetFromYAML(oc *exutil.CLI, filename string) error {
return oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filename).Execute()
}
|
nfd
| |||||
function
|
openshift/openshift-tests-private
|
deac4ddb-910a-42df-a975-3345e58824b2
|
deleteMachineSet
|
github.com/openshift/openshift-tests-private/test/extended/psap/nfd/nfd_util.go
|
func deleteMachineSet(oc *exutil.CLI, namespace string, machineSetName string) error {
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(exutil.MapiMachineset, machineSetName, "-n", namespace).Execute()
}
|
nfd
| |||||
test
|
openshift/openshift-tests-private
|
82d5e045-92ca-4211-8420-d8e381cfd089
|
nto
|
import (
"fmt"
"regexp"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
package nto
import (
"fmt"
"regexp"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-tuning-node] PSAP should", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("nto-test", exutil.KubeConfigPath())
ntoNamespace = "openshift-cluster-node-tuning-operator"
overrideFile = exutil.FixturePath("testdata", "psap", "nto", "override.yaml")
podTestFile = exutil.FixturePath("testdata", "psap", "nto", "pod_test.yaml")
podNginxFile = exutil.FixturePath("testdata", "psap", "nto", "pod-nginx.yaml")
tunedNFConntrackMaxFile = exutil.FixturePath("testdata", "psap", "nto", "tuned-nf-conntrack-max.yaml")
hPPerformanceProfileFile = exutil.FixturePath("testdata", "psap", "nto", "hp-performanceprofile.yaml")
hpPerformanceProfilePatchFile = exutil.FixturePath("testdata", "psap", "nto", "hp-performanceprofile-patch.yaml")
cgroupSchedulerBacklist = exutil.FixturePath("testdata", "psap", "nto", "cgroup-scheduler-blacklist.yaml")
cgroupSchedulerBestEffortPod = exutil.FixturePath("testdata", "psap", "nto", "cgroup-scheduler-besteffor-pod.yaml")
ntoTunedDebugFile = exutil.FixturePath("testdata", "psap", "nto", "nto-tuned-debug.yaml")
ntoIRQSMPFile = exutil.FixturePath("testdata", "psap", "nto", "default-irq-smp-affinity.yaml")
ntoRealtimeFile = exutil.FixturePath("testdata", "psap", "nto", "realtime.yaml")
ntoMCPFile = exutil.FixturePath("testdata", "psap", "nto", "machine-config-pool.yaml")
IPSFile = exutil.FixturePath("testdata", "psap", "nto", "ips.yaml")
workerStackFile = exutil.FixturePath("testdata", "psap", "nto", "worker-stack-tuned.yaml")
paoPerformanceFile = exutil.FixturePath("testdata", "psap", "pao", "pao-performanceprofile.yaml")
paoPerformancePatchFile = exutil.FixturePath("testdata", "psap", "pao", "pao-performance-patch.yaml")
paoPerformanceFixpatchFile = exutil.FixturePath("testdata", "psap", "pao", "pao-performance-fixpatch.yaml")
paoPerformanceOptimizeFile = exutil.FixturePath("testdata", "psap", "pao", "pao-performance-optimize.yaml")
paoIncludePerformanceProfile = exutil.FixturePath("testdata", "psap", "pao", "pao-include-performance-profile.yaml")
paoWorkerCnfMCPFile = exutil.FixturePath("testdata", "psap", "pao", "pao-workercnf-mcp.yaml")
paoWorkerOptimizeMCPFile = exutil.FixturePath("testdata", "psap", "pao", "pao-workeroptimize-mcp.yaml")
hugepage100MPodFile = exutil.FixturePath("testdata", "psap", "nto", "hugepage-100m-pod.yaml")
hugepageMCPfile = exutil.FixturePath("testdata", "psap", "nto", "hugepage-mcp.yaml")
hugepageTunedBoottimeFile = exutil.FixturePath("testdata", "psap", "nto", "hugepage-tuned-boottime.yaml")
stalldTunedFile = exutil.FixturePath("testdata", "psap", "nto", "stalld-tuned.yaml")
openshiftNodePostgresqlFile = exutil.FixturePath("testdata", "psap", "nto", "openshift-node-postgresql.yaml")
netPluginFile = exutil.FixturePath("testdata", "psap", "nto", "net-plugin-tuned.yaml")
cloudProviderFile = exutil.FixturePath("testdata", "psap", "nto", "cloud-provider-profile.yaml")
nodeDiffCPUsTunedBootFile = exutil.FixturePath("testdata", "psap", "nto", "node-diffcpus-tuned-bootloader.yaml")
nodeDiffCPUsMCPFile = exutil.FixturePath("testdata", "psap", "nto", "node-diffcpus-mcp.yaml")
tuningMaxPidFile = exutil.FixturePath("testdata", "psap", "nto", "tuning-maxpid.yaml")
isNTO bool
isPAOInstalled bool
paoNamespace = "openshift-performance-addon-operator"
iaasPlatform string
ManualPickup bool
podShippedFile string
podSysctlFile string
ntoTunedPidMax string
customTunedProfile string
tunedNodeName string
ntoSysctlTemplate string
ntoDefered string
ntoDeferedUpdatePatch string
err error
)
g.BeforeEach(func() {
// ensure NTO operator is installed
isNTO = isNTOPodInstalled(oc, ntoNamespace)
// get IaaS platform
iaasPlatform = exutil.CheckPlatform(oc)
e2e.Logf("Cloud provider is: %v", iaasPlatform)
ManualPickup = false
podShippedFile = exutil.FixturePath("testdata", "psap", "nto", "pod-shipped.yaml")
podSysctlFile = exutil.FixturePath("testdata", "psap", "nto", "nto-sysctl-pod.yaml")
ntoTunedPidMax = exutil.FixturePath("testdata", "psap", "nto", "nto-tuned-pidmax.yaml")
customTunedProfile = exutil.FixturePath("testdata", "psap", "nto", "custom-tuned-profiles.yaml")
ntoSysctlTemplate = exutil.FixturePath("testdata", "psap", "nto", "nto-sysctl-template.yaml")
ntoDefered = exutil.FixturePath("testdata", "psap", "nto", "deferred-nto.yaml")
ntoDeferedUpdatePatch = exutil.FixturePath("testdata", "psap", "nto", "deferred-nto-update-patch.yaml")
})
// author: [email protected]
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-29789-Sysctl parameters that set by tuned can be overwritten by parameters set via /etc/sysctl [Flaky]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
exutil.By("Pick one worker node and one tuned pod on same node")
workerNodeName, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(workerNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Worker Node: %v", workerNodeName)
tunedPodName, err := exutil.GetPodName(oc, ntoNamespace, "openshift-app=tuned", workerNodeName)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Check values set by /etc/sysctl on node and store the values")
inotify, _, err := exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "cat", "/etc/sysctl.d/inotify.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(inotify).To(o.And(
o.ContainSubstring("fs.inotify.max_user_watches"),
o.ContainSubstring("fs.inotify.max_user_instances")))
maxUserWatchesValue := getMaxUserWatchesValue(inotify)
maxUserInstancesValue := getMaxUserInstancesValue(inotify)
e2e.Logf("fs.inotify.max_user_watches has value of: %v", maxUserWatchesValue)
e2e.Logf("fs.inotify.max_user_instances has value of: %v", maxUserInstancesValue)
exutil.By("Mount /etc/sysctl on node")
_, err = exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "mount")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check sysctl kernel.pid_max on node and store the value")
kernel, _, err := exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "kernel.pid_max")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kernel).To(o.ContainSubstring("kernel.pid_max"))
pidMaxValue := getKernelPidMaxValue(kernel)
e2e.Logf("kernel.pid_max has value of: %v", pidMaxValue)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuneds.tuned.openshift.io", "override").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", workerNodeName, "tuned.openshift.io/override-").Execute()
//tuned can not override parameters set via /etc/sysctl{.conf,.d} when reapply_sysctl=true
// The settings in /etc/sysctl.d/inotify.conf as below
// fs.inotify.max_user_watches = 65536 =>Try to override to 163840 by tuned, expect the old value 65536
// fs.inotify.max_user_instances = 8192 =>Not override by tuned, expect the old value 8192
// kernel.pid_max = 4194304 =>Default value is 4194304
// The settings in custom tuned profile as below
// fs.inotify.max_user_watches = 163840 =>Try to override to 163840 by tuned, expect the old value 65536
// kernel.pid_max = 1048576 =>Override by tuned, expect the new value 1048576
exutil.By("Create new NTO CR with reapply_sysctl=true and label the node")
//reapply_sysctl=true tuned can not override parameters set via /etc/sysctl{.conf,.d}
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", workerNodeName, "tuned.openshift.io/override=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", overrideFile, "REAPPLY_SYSCTL=true")
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, workerNodeName, "override")
exutil.By("Check value of fs.inotify.max_user_instances on node (set by sysctl, should be the same as before), expected value is 8192")
maxUserInstanceCheck, _, err := exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "fs.inotify.max_user_instances")
e2e.Logf("fs.inotify.max_user_instances has value of: %v", maxUserInstanceCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(maxUserInstanceCheck).To(o.ContainSubstring(maxUserInstancesValue))
exutil.By("Check value of fs.inotify.max_user_watches on node (set by sysctl, should be the same as before),expected value is 65536")
maxUserWatchesCheck, _, err := exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "fs.inotify.max_user_watches")
e2e.Logf("fs.inotify.max_user_watches has value of: %v", maxUserWatchesCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(maxUserWatchesCheck).To(o.ContainSubstring(maxUserWatchesValue))
exutil.By("Check value of kernel.pid_max on node (set by override tuned, should be the same value of override custom profile), expected value is 1048576")
pidMaxCheck, _, err := exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "kernel.pid_max")
e2e.Logf("kernel.pid_max has value of: %v", pidMaxCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(pidMaxCheck).To(o.ContainSubstring("kernel.pid_max = 1048576"))
//tuned can override parameters set via /etc/sysctl{.conf,.d} when reapply_sysctl=false
// The settings in /etc/sysctl.d/inotify.conf as below
// fs.inotify.max_user_watches = 65536 =>Try to override to 163840 by tuned, expect the old value 163840
// fs.inotify.max_user_instances = 8192 =>Not override by tuned, expect the old value 8192
// kernel.pid_max = 4194304 =>Default value is 4194304
// The settings in custom tuned profile as below
// fs.inotify.max_user_watches = 163840 =>Try to override to 163840 by tuned, expect the old value 163840
// kernel.pid_max = 1048576 =>Override by tuned, expect the new value 1048576
exutil.By("Create new CR with reapply_sysctl=true")
//reapply_sysctl=true tuned can not override parameters set via /etc/sysctl{.conf,.d}
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", overrideFile, "REAPPLY_SYSCTL=false")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check value of fs.inotify.max_user_instances on node (set by sysctl, should be the same as before),expected value is 8192")
maxUserInstanceCheck, _, err = exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "fs.inotify.max_user_instances")
e2e.Logf("fs.inotify.max_user_instances has value of: %v", maxUserInstanceCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(maxUserInstanceCheck).To(o.ContainSubstring(maxUserInstanceCheck))
exutil.By("Check value of fs.inotify.max_user_watches on node (set by sysctl, should be the same value of override custom profile), expected value is 163840")
maxUserWatchesCheck, _, err = exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "fs.inotify.max_user_watches")
e2e.Logf("fs.inotify.max_user_watches has value of: %v", maxUserWatchesCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(maxUserWatchesCheck).To(o.ContainSubstring("fs.inotify.max_user_watches = 163840"))
exutil.By("Check value of kernel.pid_max on node (set by override tuned, should be the same value of override custom profile), expected value is 1048576")
pidMaxCheck, _, err = exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "kernel.pid_max")
e2e.Logf("kernel.pid_max has value of: %v", pidMaxCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(pidMaxCheck).To(o.ContainSubstring("kernel.pid_max = 1048576"))
})
// author: [email protected]
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-33237-Test NTO support for operatorapi Unmanaged state [Flaky]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
defer func() {
exutil.By("Remove custom profile (if not already removed) and patch default tuned back to Managed")
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuned", "nf-conntrack-max", "--ignore-not-found").Execute()
_ = patchTunedState(oc, ntoNamespace, "default", "Managed")
}()
isSNO := exutil.IsSNOCluster(oc)
is3Master := exutil.Is3MasterNoDedicatedWorkerNode(oc)
var profileCheck string
masterNodeName := getFirstMasterNodeName(oc)
defaultMasterProfileName := getDefaultProfileNameOnMaster(oc, masterNodeName)
exutil.By("Create logging namespace")
oc.SetupProject()
loggingNamespace := oc.Namespace()
exutil.By("Patch default tuned to 'Unmanaged'")
err := patchTunedState(oc, ntoNamespace, "default", "Unmanaged")
o.Expect(err).NotTo(o.HaveOccurred())
state, err := getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Unmanaged"))
exutil.By("Create new pod from CR and label it")
exutil.CreateNsResourceFromTemplate(oc, loggingNamespace, "--ignore-unknown-parameters=true", "-f", podTestFile)
err = exutil.LabelPod(oc, loggingNamespace, "web", "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait for pod web is ready")
exutil.AssertPodToBeReady(oc, "web", loggingNamespace)
exutil.By("Get the tuned node and pod names")
tunedNodeName, err := exutil.GetPodNodeName(oc, loggingNamespace, "web")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Tuned Node: %v", tunedNodeName)
tunedPodName, err := exutil.GetPodName(oc, ntoNamespace, "openshift-app=tuned", tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Create new profile from CR")
exutil.CreateNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", tunedNFConntrackMaxFile)
exutil.By("All node's current profile is:")
stdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Profile Name Per Nodes: %v", stdOut)
logsCheck, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, "--tail=9", tunedPodName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(logsCheck).NotTo(o.ContainSubstring("nf-conntrack-max"))
if isSNO || is3Master {
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal(defaultMasterProfileName))
} else {
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("openshift-node"))
}
nodeList, err := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(err).NotTo(o.HaveOccurred())
nodeListSize := len(nodeList)
for i := 0; i < nodeListSize; i++ {
output, err := exutil.DebugNodeWithChroot(oc, nodeList[i], "sysctl", "net.netfilter.nf_conntrack_max")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048576"))
}
exutil.By("Remove custom profile and pod and patch default tuned back to Managed")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuned", "nf-conntrack-max").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", loggingNamespace, "pod", "web").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = patchTunedState(oc, ntoNamespace, "default", "Managed")
o.Expect(err).NotTo(o.HaveOccurred())
state, err = getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Managed"))
exutil.By("Create new pod from CR and label it")
exutil.CreateNsResourceFromTemplate(oc, loggingNamespace, "--ignore-unknown-parameters=true", "-f", podTestFile)
err = exutil.LabelPod(oc, loggingNamespace, "web", "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the tuned node and pod names")
tunedNodeName, err = exutil.GetPodNodeName(oc, loggingNamespace, "web")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Tuned Node: %v", tunedNodeName)
tunedPodName, err = exutil.GetPodName(oc, ntoNamespace, "openshift-app=tuned", tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Create new profile from CR")
exutil.CreateNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", tunedNFConntrackMaxFile)
exutil.By("All node's current profile is:")
stdOut, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Profile Name Per Nodes: %v", stdOut)
exutil.By("Assert nf-conntrack-max applied to the node that web application run on it.")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "nf-conntrack-max")
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("nf-conntrack-max"))
exutil.By("All node's current profile is:")
stdOut, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Profile Name Per Nodes: %v", stdOut)
// tuned nodes should have value of 1048578, others should be 1048576
for i := 0; i < nodeListSize; i++ {
output, err := exutil.DebugNodeWithChroot(oc, nodeList[i], "sysctl", "net.netfilter.nf_conntrack_max")
o.Expect(err).NotTo(o.HaveOccurred())
if nodeList[i] == tunedNodeName {
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048578"))
} else {
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048576"))
}
}
exutil.By("Change tuned state back to Unmanaged and delete custom tuned")
err = patchTunedState(oc, ntoNamespace, "default", "Unmanaged")
o.Expect(err).NotTo(o.HaveOccurred())
state, err = getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Unmanaged"))
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuned", "nf-conntrack-max").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("nf-conntrack-max"))
exutil.By("Assert the log contains recommended profile (nf-conntrack-max) matches current configuratio ")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "20", 180, `recommended profile \(nf-conntrack-max\) matches current configuration|static tuning from profile 'nf-conntrack-max' applied`)
exutil.By("All node's current profile is:")
stdOut, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Profile Name Per Nodes: %v", stdOut)
// tuned nodes should have value of 1048578, others should be 1048576
for i := 0; i < nodeListSize; i++ {
output, err := exutil.DebugNodeWithChroot(oc, nodeList[i], "sysctl", "net.netfilter.nf_conntrack_max")
o.Expect(err).NotTo(o.HaveOccurred())
if nodeList[i] == tunedNodeName {
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048578"))
} else {
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048576"))
}
}
exutil.By("Changed tuned state back to Managed")
err = patchTunedState(oc, ntoNamespace, "default", "Managed")
o.Expect(err).NotTo(o.HaveOccurred())
state, err = getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Managed"))
if isSNO || is3Master {
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, defaultMasterProfileName)
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal(defaultMasterProfileName))
} else {
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node")
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("openshift-node"))
}
exutil.By("All node's current profile is:")
stdOut, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Profile Name Per Nodes: %v", stdOut)
for i := 0; i < nodeListSize; i++ {
output, err := exutil.DebugNodeWithChroot(oc, nodeList[i], "sysctl", "net.netfilter.nf_conntrack_max")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048576"))
}
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-36881-Node Tuning Operator will provide machine config for the master machine config pool [Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
isOneMasterwithNWorker := exutil.IsOneMasterWithNWorkerNodes(oc)
if !isNTO || isSNO || isOneMasterwithNWorker {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
defer func() {
exutil.By("Remove new tuning profile after test completion")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuneds.tuned.openshift.io", "openshift-node-performance-hp-performanceprofile").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
exutil.By("Add new tuning profile from CR")
exutil.CreateNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", hPPerformanceProfileFile)
exutil.By("Verify new tuned profile was created")
profiles, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profiles).To(o.ContainSubstring("openshift-node-performance-hp-performanceprofile"))
exutil.By("Get NTO pod name and check logs for priority warning")
ntoPodName, err := getNTOPodName(oc, ntoNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("NTO pod name: %v", ntoPodName)
//ntoPodLogs, err := exutil.GetSpecificPodLogs(oc, ntoNamespace, "", ntoPodName, "")
assertNTOPodLogsLastLines(oc, ntoNamespace, ntoPodName, "10", 180, `openshift-node-performance-hp-performanceprofile have the same priority 30.*please use a different priority for your custom profiles`)
//o.Expect(err).NotTo(o.HaveOccurred())
//o.Expect(ntoPodLogs).To(o.ContainSubstring("profiles openshift-control-plane/openshift-node-performance-hp-performanceprofile have the same priority 30, please use a different priority for your custom profiles!"))
exutil.By("Patch priority for openshift-node-performance-hp-performanceprofile tuned to 18")
err = patchTunedProfile(oc, ntoNamespace, "openshift-node-performance-hp-performanceprofile", hpPerformanceProfilePatchFile)
o.Expect(err).NotTo(o.HaveOccurred())
tunedPriority, err := getTunedPriority(oc, ntoNamespace, "openshift-node-performance-hp-performanceprofile")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPriority).To(o.Equal("18"))
exutil.By("Check Nodes for expected changes")
masterNodeName := assertIfNodeSchedulingDisabled(oc)
e2e.Logf("The master node %v has been rebooted", masterNodeName)
exutil.By("Check MachineConfigPool for expected changes")
exutil.AssertIfMCPChangesAppliedByName(oc, "master", 1800)
exutil.By("Ensure the settings took effect on the master nodes, only check the first rebooted nodes")
assertIfMasterNodeChangesApplied(oc, masterNodeName)
exutil.By("Check MachineConfig kernel arguments for expected changes")
mcCheck, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("mc").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(mcCheck).To(o.ContainSubstring("50-nto-master"))
mcKernelArgCheck, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("mc/50-nto-master").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(mcKernelArgCheck).To(o.ContainSubstring("default_hugepagesz=2M"))
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-23959-Test NTO for remove pod in daemon mode [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
ntoRes := ntoResource{
name: "kernel-pid-max",
namespace: ntoNamespace,
template: customTunedProfile,
sysctlparm: "kernel.pid_max",
sysctlvalue: "128888",
}
defer func() {
exutil.By("Remove custom profile (if not already removed) and patch default tuned back to Managed")
ntoRes.delete(oc)
_ = patchTunedState(oc, ntoNamespace, "default", "Managed")
}()
isSNO := exutil.IsSNOCluster(oc)
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer func() {
exutil.By("Forcily delete labeled pod on first worker node after test case executed in case compareSysctlDifferentFromSpecifiedValueByName step failure")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", tunedPodName, "-n", ntoNamespace, "--ignore-not-found").Execute()
}()
exutil.By("Apply new profile from CR")
ntoRes.createTunedProfileIfNotExist(oc)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check all nodes for kernel.pid_max value, all node should different from 128888")
compareSysctlDifferentFromSpecifiedValueByName(oc, "kernel.pid_max", "128888")
exutil.By("Label tuned pod as tuned.openshift.io/elasticsearch=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", tunedPodName, "-n", ntoNamespace, "tuned.openshift.io/elasticsearch=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if customized tuned profile applied on target node")
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "kernel-pid-max", "True")
exutil.By("Compare if the value kernel.pid_max in on node with labeled pod, should be 128888")
compareSysctlValueOnSepcifiedNodeByName(oc, tunedNodeName, "kernel.pid_max", "", "128888")
exutil.By("Delete labeled tuned pod by name")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", tunedPodName, "-n", ntoNamespace).Execute()
exutil.By("Check all nodes for kernel.pid_max value, all node should different from 128888")
compareSysctlDifferentFromSpecifiedValueByName(oc, "kernel.pid_max", "128888")
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-23958-Test NTO for label pod in daemon mode [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
ntoRes := ntoResource{
name: "user-max-ipc-namespaces",
namespace: ntoNamespace,
template: customTunedProfile,
sysctlparm: "user.max_ipc_namespaces",
sysctlvalue: "121112",
}
defer func() {
exutil.By("Remove custom profile (if not already removed) and patch default tuned back to Managed")
ntoRes.delete(oc)
}()
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer func() {
exutil.By("Forcily remove label from the pod on first worker node in case compareSysctlDifferentFromSpecifiedValueByName step failure")
err = exutil.LabelPod(oc, ntoNamespace, tunedPodName, "tuned.openshift.io/elasticsearch-")
}()
exutil.By("Apply new profile from CR")
ntoRes.createTunedProfileIfNotExist(oc)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check all nodes for user.max_ipc_namespaces value, all node should different from 121112")
compareSysctlDifferentFromSpecifiedValueByName(oc, "user.max_ipc_namespaces", "121112")
exutil.By("Label tuned pod as tuned.openshift.io/elasticsearch=")
err = exutil.LabelPod(oc, ntoNamespace, tunedPodName, "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check current profile for each node")
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "user-max-ipc-namespaces", "True")
exutil.By("Compare if the value user.max_ipc_namespaces in on node with labeled pod, should be 121112")
compareSysctlValueOnSepcifiedNodeByName(oc, tunedNodeName, "user.max_ipc_namespaces", "", "121112")
exutil.By("Remove label from tuned pod as tuned.openshift.io/elasticsearch-")
err = exutil.LabelPod(oc, ntoNamespace, tunedPodName, "tuned.openshift.io/elasticsearch-")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check all nodes for user.max_ipc_namespaces value, all node should different from 121112")
compareSysctlDifferentFromSpecifiedValueByName(oc, "user.max_ipc_namespaces", "121112")
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-43173-NTO Cgroup Blacklist Pod should affine to default cpuset.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get how many cpus on the specified worker node
exutil.By("Get how many cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCPUCoresInt <= 1 {
g.Skip("the worker node don't have enough cpus - skipping test ...")
}
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Remove custom profile (if not already removed) and remove node label")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "-n", ntoNamespace, "cgroup-scheduler-affinecpuset").Execute()
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned-scheduler-node-").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
exutil.By("Label the specified linux node with label tuned-scheduler-node")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned-scheduler-node=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// setting cgroup_ps_blacklist=/kubepods\.slice/
// the process belong the /kubepods\.slice/ can consume all cpuset
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
// the process doesn't belong the /kubepods\.slice/ can consume all cpuset
// The expected Cpus_allowed_list in /proc/$PID/status should be 0 or 0,2-N
exutil.By("Create NTO custom tuned profile cgroup-scheduler-affinecpuset")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cgroupSchedulerBacklist, "-p", "PROFILE_NAME=cgroup-scheduler-affinecpuset", `CGROUP_BLACKLIST=/kubepods\.slice/`)
exutil.By("Check if NTO custom tuned profile cgroup-scheduler-affinecpuset was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "cgroup-scheduler-affinecpuset")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
exutil.By("Verified the cpu allow list in cgroup black list for tuned ...")
clusterVersion, _, err := exutil.GetClusterVersion(oc)
versionReg := regexp.MustCompile(`4.12|4.13`)
o.Expect(err).NotTo(o.HaveOccurred())
if versionReg.MatchString(clusterVersion) {
o.Expect(assertProcessInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "openshift-tuned", nodeCPUCoresInt)).To(o.Equal(true))
} else {
o.Expect(assertProcessInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "tuned", nodeCPUCoresInt)).To(o.Equal(true))
}
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
exutil.By("Verified the cpu allow list in cgroup black list for chronyd ...")
o.Expect(assertProcessNOTInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "chronyd", nodeCPUCoresInt)).To(o.Equal(true))
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-27491-Add own custom profile to tuned operator [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
ntoRes := ntoResource{
name: "user-max-mnt-namespaces",
namespace: ntoNamespace,
template: customTunedProfile,
sysctlparm: "user.max_mnt_namespaces",
sysctlvalue: "142214",
}
masterNodeName := getFirstMasterNodeName(oc)
defaultMasterProfileName := getDefaultProfileNameOnMaster(oc, masterNodeName)
oc.SetupProject()
ntoTestNS := oc.Namespace()
is3CPNoWorker := exutil.Is3MasterNoDedicatedWorkerNode(oc)
//Clean up the custom profile user-max-mnt-namespaces and unlabel the nginx pod
defer ntoRes.delete(oc)
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
// AppImageName := exutil.GetImagestreamImageName(oc, "tests")
// if len(AppImageName) == 0 {
AppImageName := "quay.io/openshifttest/nginx-alpine@sha256:04f316442d48ba60e3ea0b5a67eb89b0b667abf1c198a3d0056ca748736336a0"
// }
//Create a nginx web application pod
exutil.By("Create a nginx web pod in nto temp namespace")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", podShippedFile, "-p", "IMAGENAME="+AppImageName)
//Check if nginx pod is ready
exutil.AssertPodToBeReady(oc, "nginx", ntoTestNS)
//Get the node name in the same node as nginx app
tunedNodeName, err := exutil.GetPodNodeName(oc, ntoTestNS, "nginx")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("tunedNodeName is %v", tunedNodeName)
//Get the tuned pod name in the same node as nginx app
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//Label pod nginx with tuned.openshift.io/elasticsearch=
exutil.By("Label nginx pod as tuned.openshift.io/elasticsearch=")
err = exutil.LabelPod(oc, ntoTestNS, "nginx", "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
//Apply new profile that match label tuned.openshift.io/elasticsearch=
exutil.By("Apply new profile from CR")
ntoRes.createTunedProfileIfNotExist(oc)
exutil.By("Check if new profile user-max-mnt-namespaces applied to labeled node")
//Verify if the new profile is applied
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "user-max-mnt-namespaces")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("user-max-mnt-namespaces"))
exutil.By("Assert static tuning from profile 'user-max-mnt-namespaces' applied in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 180, `static tuning from profile 'user-max-mnt-namespaces' applied|active and recommended profile \(user-max-mnt-namespaces\) match`)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Compare if the value user.max_mnt_namespaces in on node with labeled pod, should be 142214")
compareSysctlValueOnSepcifiedNodeByName(oc, tunedNodeName, "user.max_mnt_namespaces", "", "142214")
exutil.By("Delete custom tuned profile user.max_mnt_namespaces")
ntoRes.delete(oc)
//Check if restore to default profile.
isSNO := exutil.IsSNOCluster(oc)
if isSNO || is3CPNoWorker {
exutil.By("The cluster is SNO or Compact Cluster")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, defaultMasterProfileName)
exutil.By("Assert default profile applied in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 180, "'"+defaultMasterProfileName+"' applied|("+defaultMasterProfileName+") match")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal(defaultMasterProfileName))
} else {
exutil.By("The cluster is regular OCP Cluster")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node")
exutil.By("Assert profile 'openshift-node' applied in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 180, `static tuning from profile 'openshift-node' applied|active and recommended profile \(openshift-node\) match`)
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("openshift-node"))
}
exutil.By("Check all nodes for user.max_mnt_namespaces value, all node should different from 142214")
compareSysctlDifferentFromSpecifiedValueByName(oc, "user.max_mnt_namespaces", "142214")
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-NonPreRelease-Longduration-Author:liqcui-Medium-37125-Turning on debugging for tuned containers.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
ntoRes := ntoResource{
name: "user-max-net-namespaces",
namespace: ntoNamespace,
template: ntoTunedDebugFile,
sysctlparm: "user.max_net_namespaces",
sysctlvalue: "101010",
}
var (
isEnableDebug bool
isDebugInLog bool
)
//Clean up the custom profile user-max-mnt-namespaces
defer ntoRes.delete(oc)
//Create a temp namespace to deploy nginx pod
oc.SetupProject()
ntoTestNS := oc.Namespace()
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
// AppImageName := exutil.GetImagestreamImageName(oc, "tests")
// if len(AppImageName) == 0 {
AppImageName := "quay.io/openshifttest/nginx-alpine@sha256:04f316442d48ba60e3ea0b5a67eb89b0b667abf1c198a3d0056ca748736336a0"
// }
//Create a nginx web application pod
exutil.By("Create a nginx web pod in nto temp namespace")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", podNginxFile, "-p", "IMAGENAME="+AppImageName)
//Check if nginx pod is ready
exutil.AssertPodToBeReady(oc, "nginx", ntoTestNS)
//Get the node name in the same node as nginx app
tunedNodeName, err := exutil.GetPodNodeName(oc, ntoTestNS, "nginx")
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node as nginx app
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//To reset tuned pod log, forcily to delete tuned pod
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", tunedPodName, "-n", ntoNamespace, "--ignore-not-found=true").Execute()
//Label pod nginx with tuned.openshift.io/elasticsearch=
exutil.By("Label nginx pod as tuned.openshift.io/elasticsearch=")
err = exutil.LabelPod(oc, ntoTestNS, "nginx", "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
//Verify if debug was disabled by default
exutil.By("Check node profile debug settings, it should be debug: false")
isEnableDebug = assertDebugSettings(oc, tunedNodeName, ntoNamespace, "false")
o.Expect(isEnableDebug).To(o.Equal(true))
//Apply new profile that match label tuned.openshift.io/elasticsearch=
exutil.By("Apply new profile from CR with debug setting is false")
ntoRes.createDebugTunedProfileIfNotExist(oc, false)
//Verify if the new profile is applied
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "user-max-net-namespaces", "True")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("user-max-net-namespaces"))
//Verify nto tuned logs
exutil.By("Check NTO tuned pod logs to confirm if user-max-net-namespaces applied")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 180, `'user-max-net-namespaces' applied|\(user-max-net-namespaces\) match`)
//Verify if debug is false by CR setting
exutil.By("Check node profile debug settings, it should be debug: false")
isEnableDebug = assertDebugSettings(oc, tunedNodeName, ntoNamespace, "false")
o.Expect(isEnableDebug).To(o.Equal(true))
//Check if the log contain debug, the expected result should be none
exutil.By("Check if tuned pod log contains debug key word, the expected result should be no DEBUG")
isDebugInLog = exutil.AssertOprPodLogsbyFilter(oc, tunedPodName, ntoNamespace, "DEBUG", 2)
o.Expect(isDebugInLog).To(o.Equal(false))
exutil.By("Delete custom profile and will apply a new one ...")
ntoRes.delete(oc)
exutil.By("Apply new profile from CR with debug setting is true")
ntoRes.createDebugTunedProfileIfNotExist(oc, true)
//Verify if the new profile is applied
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "user-max-net-namespaces", "True")
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("user-max-net-namespaces"))
//Verify nto tuned logs
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 180, `'user-max-net-namespaces' applied|\(user-max-net-namespaces\) match`)
//Verify if debug was enabled by CR setting
exutil.By("Check if the debug is true in node profile, the expected result should be true")
isEnableDebug = assertDebugSettings(oc, tunedNodeName, ntoNamespace, "true")
o.Expect(isEnableDebug).To(o.Equal(true))
//The log shouldn't contain debug in log
exutil.By("Check if tuned pod log contains debug key word, the log should contain DEBUG")
exutil.AssertOprPodLogsbyFilterWithDuration(oc, tunedPodName, ntoNamespace, "DEBUG", 60, 2)
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-37415-Allow setting isolated_cores without touching the default_irq_affinity [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned.openshift.io/default-irq-smp-affinity-").Execute()
exutil.By("Label the node with default-irq-smp-affinity ")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned.openshift.io/default-irq-smp-affinity=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the default values of /proc/irq/default_smp_affinity on worker nodes")
//Replace exutil.DebugNodeWithOptionsAndChroot with oc.AsAdmin().WithoutNamespace due to throw go warning even if set --quiet=true
//This test case must got the value of default_smp_affinity without warning information
defaultSMPAffinity, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "cat", "/proc/irq/default_smp_affinity").Output()
e2e.Logf("the default value of /proc/irq/default_smp_affinity without cpu affinity is: %v", defaultSMPAffinity)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultSMPAffinity).NotTo(o.BeEmpty())
defaultSMPAffinity = strings.ReplaceAll(defaultSMPAffinity, ",", "")
defaultSMPAffinityMask := getDefaultSMPAffinityBitMaskbyCPUCores(oc, tunedNodeName)
o.Expect(defaultSMPAffinity).To(o.ContainSubstring(defaultSMPAffinityMask))
e2e.Logf("the value of /proc/irq/default_smp_affinity: %v", defaultSMPAffinityMask)
cpuBitsMask := convertCPUBitMaskToByte(defaultSMPAffinityMask)
o.Expect(cpuBitsMask).NotTo(o.BeEmpty())
ntoRes1 := ntoResource{
name: "default-irq-smp-affinity",
namespace: ntoNamespace,
template: ntoIRQSMPFile,
sysctlparm: "#default_irq_smp_affinity",
sysctlvalue: "1",
}
defer ntoRes1.delete(oc)
exutil.By("Create default-irq-smp-affinity profile to enable isolated_cores=1")
ntoRes1.createIRQSMPAffinityProfileIfNotExist(oc)
exutil.By("Check if new NTO profile was applied")
ntoRes1.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "default-irq-smp-affinity", "True")
exutil.By("Check values of /proc/irq/default_smp_affinity on worker nodes after enabling isolated_cores=1")
isolatedcoresSMPAffinity, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "cat", "/proc/irq/default_smp_affinity").Output()
isolatedcoresSMPAffinity = strings.ReplaceAll(isolatedcoresSMPAffinity, ",", "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(isolatedcoresSMPAffinity).NotTo(o.BeEmpty())
e2e.Logf("the value of default_smp_affinity after setting isolated_cores=1 is: %v", isolatedcoresSMPAffinity)
exutil.By("Verify if the value of /proc/irq/default_smp_affinity is affected by isolated_cores=1")
//Isolate the second cpu cores, the default_smp_affinity should be changed
isolatedCPU := convertIsolatedCPURange2CPUList("1")
o.Expect(isolatedCPU).NotTo(o.BeEmpty())
newSMPAffinityMask := assertIsolateCPUCoresAffectedBitMask(cpuBitsMask, isolatedCPU)
o.Expect(newSMPAffinityMask).NotTo(o.BeEmpty())
o.Expect(isolatedcoresSMPAffinity).To(o.ContainSubstring(newSMPAffinityMask))
exutil.By("Remove the old profile and create a new one later ...")
ntoRes1.delete(oc)
ntoRes2 := ntoResource{
name: "default-irq-smp-affinity",
namespace: ntoNamespace,
template: ntoIRQSMPFile,
sysctlparm: "default_irq_smp_affinity",
sysctlvalue: "1",
}
defer ntoRes2.delete(oc)
exutil.By("Create default-irq-smp-affinity profile to enable default_irq_smp_affinity=1")
ntoRes2.createIRQSMPAffinityProfileIfNotExist(oc)
exutil.By("Check if new NTO profile was applied")
ntoRes2.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "default-irq-smp-affinity", "True")
exutil.By("Check values of /proc/irq/default_smp_affinity on worker nodes")
//We only need to return the value /proc/irq/default_smp_affinity without stdErr
IRQSMPAffinity, _, err := exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"--quiet=true", "--to-namespace=" + ntoNamespace}, "cat", "/proc/irq/default_smp_affinity")
IRQSMPAffinity = strings.ReplaceAll(IRQSMPAffinity, ",", "")
o.Expect(IRQSMPAffinity).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
//Isolate the second cpu cores, the default_smp_affinity should be changed
e2e.Logf("the value of default_smp_affinity after setting default_irq_smp_affinity=1 is: %v", IRQSMPAffinity)
isMatch := assertDefaultIRQSMPAffinityAffectedBitMask(cpuBitsMask, isolatedCPU, string(IRQSMPAffinity))
o.Expect(isMatch).To(o.Equal(true))
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-44650-NTO profiles provided with TuneD [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
//Get the tuned pod name that run on first worker node
tunedNodeName, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
exutil.By("Check kernel version of worker nodes ...")
kernelVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.kernelVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kernelVersion).NotTo(o.BeEmpty())
exutil.By("Check default tuned profile list, should contain openshift-control-plane and openshift-node")
defaultTunedOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned", "default", "-ojsonpath={.spec.recommend}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultTunedOutput).NotTo(o.BeEmpty())
o.Expect(defaultTunedOutput).To(o.And(
o.ContainSubstring("openshift-control-plane"),
o.ContainSubstring("openshift-node")))
exutil.By("Check content of tuned file /usr/lib/tuned/openshift/tuned.conf to match default NTO settings")
openshiftTunedConf, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/openshift/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftTunedConf).NotTo(o.BeEmpty())
if strings.Contains(kernelVersion, "el8") || strings.Contains(kernelVersion, "el7") {
o.Expect(openshiftTunedConf).To(o.And(
o.ContainSubstring("avc_cache_threshold=8192"),
o.ContainSubstring("kernel.pid_max=>4194304"),
o.ContainSubstring("net.netfilter.nf_conntrack_max=1048576"),
o.ContainSubstring("net.ipv4.conf.all.arp_announce=2"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh1=8192"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh2=32768"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh3=65536"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh1=8192"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh2=32768"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh3=65536"),
o.ContainSubstring("vm.max_map_count=262144"),
o.ContainSubstring("/sys/module/nvme_core/parameters/io_timeout=4294967295"),
o.ContainSubstring(`cgroup_ps_blacklist=/kubepods\.slice/`),
o.ContainSubstring("runtime=0")))
} else {
o.Expect(openshiftTunedConf).To(o.And(
o.ContainSubstring("avc_cache_threshold=8192"),
o.ContainSubstring("nf_conntrack_hashsize=1048576"),
o.ContainSubstring("kernel.pid_max=>4194304"),
o.ContainSubstring("fs.aio-max-nr=>1048576"),
o.ContainSubstring("net.netfilter.nf_conntrack_max=1048576"),
o.ContainSubstring("net.ipv4.conf.all.arp_announce=2"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh1=8192"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh2=32768"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh3=65536"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh1=8192"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh2=32768"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh3=65536"),
o.ContainSubstring("vm.max_map_count=262144"),
o.ContainSubstring("/sys/module/nvme_core/parameters/io_timeout=4294967295"),
o.ContainSubstring(`cgroup_ps_blacklist=/kubepods\.slice/`),
o.ContainSubstring("runtime=0")))
}
exutil.By("Check content of tuned file /usr/lib/tuned/openshift-control-plane/tuned.conf to match default NTO settings")
openshiftControlPlaneTunedConf, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/openshift-control-plane/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftControlPlaneTunedConf).NotTo(o.BeEmpty())
o.Expect(openshiftControlPlaneTunedConf).To(o.ContainSubstring("include=openshift"))
if strings.Contains(kernelVersion, "el8") || strings.Contains(kernelVersion, "el7") {
o.Expect(openshiftControlPlaneTunedConf).To(o.And(
o.ContainSubstring("sched_wakeup_granularity_ns=4000000"),
o.ContainSubstring("sched_migration_cost_ns=5000000")))
} else {
o.Expect(openshiftControlPlaneTunedConf).NotTo(o.And(
o.ContainSubstring("sched_wakeup_granularity_ns=4000000"),
o.ContainSubstring("sched_migration_cost_ns=5000000")))
}
exutil.By("Check content of tuned file /usr/lib/tuned/openshift-node/tuned.conf to match default NTO settings")
openshiftNodeTunedConf, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/openshift-node/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftNodeTunedConf).To(o.And(
o.ContainSubstring("include=openshift"),
o.ContainSubstring("net.ipv4.tcp_fastopen=3"),
o.ContainSubstring("fs.inotify.max_user_watches=65536"),
o.ContainSubstring("fs.inotify.max_user_instances=8192")))
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-33238-Test NTO support for operatorapi Removed state [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
exutil.By("Remove custom profile (if not already removed) and patch default tuned back to Managed")
//Cleanup tuned and change back to managed state
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuned", "tuning-pidmax", "--ignore-not-found").Execute()
defer patchTunedState(oc, ntoNamespace, "default", "Managed")
ntoRes := ntoResource{
name: "tuning-pidmax",
namespace: ntoNamespace,
template: customTunedProfile,
sysctlparm: "kernel.pid_max",
sysctlvalue: "182218",
}
oc.SetupProject()
ntoTestNS := oc.Namespace()
//Clean up the custom profile user-max-mnt-namespaces and unlabel the nginx pod
defer ntoRes.delete(oc)
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
// AppImageName := exutil.GetImagestreamImageName(oc, "tests")
// if len(AppImageName) == 0 {
AppImageName := "quay.io/openshifttest/nginx-alpine@sha256:04f316442d48ba60e3ea0b5a67eb89b0b667abf1c198a3d0056ca748736336a0"
// }
//Create a nginx web application pod
exutil.By("Create a nginx web pod in nto temp namespace")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", podNginxFile, "-p", "IMAGENAME="+AppImageName)
//Check if nginx pod is ready
exutil.AssertPodToBeReady(oc, "nginx", ntoTestNS)
//Get the node name in the same node as nginx app
tunedNodeName, err := exutil.GetPodNodeName(oc, ntoTestNS, "nginx")
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node as nginx app
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
e2e.Logf("the tuned name on node %v is %v", tunedNodeName, tunedPodName)
//Label pod nginx with tuned.openshift.io/elasticsearch=
exutil.By("Label nginx pod as tuned.openshift.io/elasticsearch=")
err = exutil.LabelPod(oc, ntoTestNS, "nginx", "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
//Apply new profile that match label tuned.openshift.io/elasticsearch=
exutil.By("Apply new profile from CR")
ntoRes.createTunedProfileIfNotExist(oc)
//Verify if the new profile is applied
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-pidmax", "True")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("tuning-pidmax"))
exutil.By("Check logs, profile changes SHOULD be applied since tuned is MANAGED")
logsCheck, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, "--tail=9", tunedPodName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(logsCheck).To(o.ContainSubstring("tuning-pidmax"))
exutil.By("Compare if the value user.max_ipc_namespaces in on node with labeled pod, should be 182218")
compareSysctlValueOnSepcifiedNodeByName(oc, tunedNodeName, "kernel.pid_max", "", "182218")
exutil.By("Patch default tuned to 'Removed'")
err = patchTunedState(oc, ntoNamespace, "default", "Removed")
o.Expect(err).NotTo(o.HaveOccurred())
state, err := getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Removed"))
exutil.By("Check logs, profiles, and nodes (profile changes SHOULD NOT be applied since tuned is REMOVED)")
exutil.By("Check pod status, all tuned pod should be terminated since tuned is REMOVED")
exutil.WaitForNoPodsAvailableByKind(oc, "daemonset", "tuned", ntoNamespace)
podCheck, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "pods").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podCheck).NotTo(o.ContainSubstring("tuned"))
exutil.By("Check profile status, all node profile should be removed since tuned is REMOVED)")
profileCheck, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.ContainSubstring("No resources"))
exutil.By("Change tuned state back to managed ...")
err = patchTunedState(oc, ntoNamespace, "default", "Managed")
o.Expect(err).NotTo(o.HaveOccurred())
state, err = getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Managed"))
exutil.By("Get the tuned node and pod names")
//Get the node name in the same node as nginx app
tunedNodeName, err = exutil.GetPodNodeName(oc, ntoTestNS, "nginx")
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node as nginx app
tunedPodName = getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
exutil.By("Check logs, profiles, and nodes (profile changes SHOULD be applied since tuned is MANAGED)")
//Verify if the new profile is applied
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-pidmax", "True")
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("tuning-pidmax"))
exutil.By("Check logs, profile changes SHOULD be applied since tuned is MANAGED)")
logsCheck, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, "--tail=9", tunedPodName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(logsCheck).To(o.ContainSubstring("tuning-pidmax"))
exutil.By("Compare if the value user.max_ipc_namespaces in on node with labeled pod, should be 182218")
compareSysctlValueOnSepcifiedNodeByName(oc, tunedNodeName, "kernel.pid_max", "", "182218")
})
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-30589-NTO Use MachineConfigs to lay down files needed for tuned [Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-rt", "worker-rt", 300)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-rt-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-realtime", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Create machine config pool")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ntoMCPFile, "-p", "MCP_NAME=worker-rt")
exutil.By("Label the node with node-role.kubernetes.io/worker-rt=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-rt=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create openshift-realtime profile")
//ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
// o.Expect(err).NotTo(o.HaveOccurred())
// if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", ntoRealtimeFile, "-p", "INCLUDE=openshift-node,realtime")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert if machine config pool applied for worker nodes")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 300)
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-rt", 300)
exutil.By("Assert if openshift-realtime profile was applied ...")
//Verify if the new profile is applied
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-realtime")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("openshift-realtime"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert if isolcpus was applied in machineconfig...")
AssertTunedAppliedMC(oc, "nto-worker-rt", "isolcpus=")
exutil.By("Assert if isolcpus was applied in labled node...")
isMatch := AssertTunedAppliedToNode(oc, tunedNodeName, "isolcpus=")
o.Expect(isMatch).To(o.Equal(true))
exutil.By("Delete openshift-realtime tuned in labled node...")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-realtime", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Check Nodes for expected changes")
assertIfNodeSchedulingDisabled(oc)
exutil.By("Assert if machine config pool applied for worker nodes")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-rt", 300)
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert if isolcpus was applied in labled node...")
isMatch = AssertTunedAppliedToNode(oc, tunedNodeName, "isolcpus=")
o.Expect(isMatch).To(o.Equal(false))
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Delete custom MC and MCP by following right way...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-rt-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-rt", "worker-rt", 300)
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-29804-Tuned profile is updated after incorrect tuned CR is fixed [Disruptive]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
is3Master := exutil.Is3MasterNoDedicatedWorkerNode(oc)
var (
tunedNodeName string
err error
)
//Use the last worker node as labeled node
//Support 3 master/worker node, no dedicated worker nodes
if !is3Master && !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
e2e.Logf("tunedNodeName is:\n%v", tunedNodeName)
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "ips", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Label the node with tuned=ips")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned=ips", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create ips-host profile, new tuned should automatically handle duplicate sysctl settings")
//Define duplicated parameter and value
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", IPSFile, "-p", "SYSCTLPARM1=kernel.pid_max", "SYSCTLVALUE1=1048575", "SYSCTLPARM2=kernel.pid_max", "SYSCTLVALUE2=1048575")
exutil.By("Assert recommended profile (ips-host) matches current configuration in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "15", 180, `recommended profile \(ips-host\) matches current configuration|\(ips-host\) match|'ips-host' applied`)
exutil.By("Check if new custom profile applied to label node")
o.Expect(assertNTOCustomProfileStatus(oc, ntoNamespace, tunedNodeName, "ips-host", "True", "False")).To(o.Equal(true))
//Only used for debug info
exutil.By("Check current profile for each node")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
e2e.Logf("Current profile for each node: \n%v", output)
//New tuned can automatically de-duplicate value of sysctl, no duplicate error anymore
exutil.By("Assert if the duplicate value of sysctl kernel.pid_max take effective on target node, expected value should be 1048575")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "kernel.pid_max", "1048575")
exutil.By("Get default value of fs.mount-max on label node")
defaultMaxMapCount := getValueOfSysctlByName(oc, ntoNamespace, tunedNodeName, "fs.mount-max")
o.Expect(defaultMaxMapCount).NotTo(o.BeEmpty())
e2e.Logf("The default value of sysctl fs.mount-max is %v", defaultMaxMapCount)
//setting an invalid value for ips-host profile
exutil.By("Update ips-host profile with invalid value of fs.mount-max = -1")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", IPSFile, "-p", "SYSCTLPARM1=fs.mount-max", "SYSCTLVALUE1=-1", "SYSCTLPARM2=kernel.pid_max", "SYSCTLVALUE2=1048575")
exutil.By("Assert static tuning from profile 'ips-host' applied in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "20", 180, `'ips-host' applied|recommended profile \(ips-host\) matches current configuration`)
exutil.By("Check if new custom profile applied to label node")
o.Expect(assertNTOCustomProfileStatus(oc, ntoNamespace, tunedNodeName, "ips-host", "True", "True")).To(o.Equal(true))
exutil.By("Check current profile for each node")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
e2e.Logf("Current profile for each node: \n%v", output)
//The invalid value won't impact default value of fs.mount-max
exutil.By("Assert if the value of sysctl fs.mount-max still use default value")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "fs.mount-max", defaultMaxMapCount)
//setting an new value of fs.mount-max for ips-host profile
exutil.By("Update ips-host profile with new value of fs.mount-max = 868686")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", IPSFile, "-p", "SYSCTLPARM1=fs.mount-max", "SYSCTLVALUE1=868686", "SYSCTLPARM2=kernel.pid_max", "SYSCTLVALUE2=1048575")
exutil.By("Assert recommended profile (ips-host) matches current configuration in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "15", 180, `recommended profile \(ips-host\) matches current configuration|\(ips-host\) match|'ips-host' applied`)
exutil.By("Check if new custom profile applied to label node")
o.Expect(assertNTOCustomProfileStatus(oc, ntoNamespace, tunedNodeName, "ips-host", "True", "False")).To(o.Equal(true))
exutil.By("Check current profile for each node")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
e2e.Logf("Current profile for each node: \n%v", output)
//The invalid value won't impact default value of fs.mount-max
exutil.By("Assert if the new value of sysctl fs.mount-max take effective, expected value is 868686")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "fs.mount-max", "868686")
})
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-39123-NTO Operator will update tuned after changing included profile [Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
skipPAODeploy := skipDeployPAO(oc)
isPAOInstalled = exutil.IsPAOInstalled(oc)
if skipPAODeploy || isPAOInstalled {
e2e.Logf("PAO has been installed and continue to execute test case")
} else {
isPAOInOperatorHub := exutil.IsPAOInOperatorHub(oc)
if !isPAOInOperatorHub {
g.Skip("PAO is not in OperatorHub - skipping test ...")
}
exutil.InstallPAO(oc, paoNamespace)
}
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-cnf", "worker-cnf", 300)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "performance-patch", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("PerformanceProfile", "performance", "--ignore-not-found").Execute()
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-cnf-").Execute()
exutil.By("Label the node with node-role.kubernetes.io/worker-cnf=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-cnf=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// currently test is only supported on AWS, GCP, and Azure
// if iaasPlatform == "aws" || iaasPlatform == "gcp" {
ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
//Only GCP and AWS support realtime-kenel
exutil.By("Apply performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoPerformanceFile, "-p", "ISENABLED=true")
} else {
exutil.By("Apply performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoPerformanceFile, "-p", "ISENABLED=false")
}
exutil.By("Apply worker-cnf machineconfigpool")
exutil.ApplyOperatorResourceByYaml(oc, paoNamespace, paoWorkerCnfMCPFile)
exutil.By("Assert if the MCP worker-cnf has been successfully applied ...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-cnf", 900)
exutil.By("Check if new NTO profile openshift-node-performance-performance was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-performance-performance")
exutil.By("Check if profile openshift-node-performance-performance applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-performance"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if tuned pod logs contains openshift-node-performance-performance on labeled nodes")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "20", 60, "openshift-node-performance-performance")
exutil.By("Check if the linux kernel parameter as vm.stat_interval = 10")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "vm.stat_interval", "10")
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Apply performance-patch profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, paoPerformancePatchFile)
exutil.By("Assert if the MCP worker-cnf is ready after node rebooted ...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-cnf", 750)
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if profile what's active profile applied on nodes")
nodeProfileName, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-performance"))
exutil.By("Check if tuned pod logs contains Cannot find profile 'openshift-node-performance-example-performanceprofile' on labeled nodes")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "30", 60, "Cannot find profile")
exutil.By("Check if the linux kernel parameter as vm.stat_interval = 1")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "vm.stat_interval", "1")
exutil.By("Patch include to include=openshift-node-performance-performance")
err = patchTunedProfile(oc, ntoNamespace, "performance-patch", paoPerformanceFixpatchFile)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert if the MCP worker-cnf is ready after node rebooted ...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-cnf", 600)
exutil.By("Check if new NTO profile performance-patch was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "performance-patch")
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if contains static tuning from profile 'performance-patch' applied in tuned pod logs on labeled nodes")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "30", 60, `static tuning from profile 'performance-patch' applied|recommended profile \(performance-patch\) matches current configuration`)
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if the linux kernel parameter as vm.stat_interval = 10")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "vm.stat_interval", "10")
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Delete custom MC and MCP by following right way...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-cnf-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-cnf", "worker-cnf", 480)
})
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-45686-NTO Creating tuned profile with references to not yet existing Performance Profile configuration.[Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
skipPAODeploy := skipDeployPAO(oc)
isPAOInstalled = exutil.IsPAOInstalled(oc)
if skipPAODeploy || isPAOInstalled {
e2e.Logf("PAO has been installed and continue to execute test case")
} else {
isPAOInOperatorHub := exutil.IsPAOInOperatorHub(oc)
if !isPAOInOperatorHub {
g.Skip("PAO is not in OperatorHub - skipping test ...")
}
exutil.InstallPAO(oc, paoNamespace)
}
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-optimize", "worker-optimize", 360)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "include-performance-profile", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("PerformanceProfile", "optimize", "--ignore-not-found").Execute()
//Use the last worker node as labeled node
tunedNodeName, err := exutil.GetLastLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-optimize-").Execute()
exutil.By("Label the node with node-role.kubernetes.io/worker-optimize=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-optimize=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Apply worker-optimize machineconfigpool")
exutil.ApplyOperatorResourceByYaml(oc, paoNamespace, paoWorkerOptimizeMCPFile)
exutil.By("Assert if the MCP has been successfully applied ...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-optimize", 600)
isSNO = exutil.IsSNOCluster(oc)
if isSNO {
exutil.By("Apply include-performance-profile tuned profile")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", paoIncludePerformanceProfile, "-p", "ROLENAME=master")
exutil.By("Assert if the mcp is ready after server has been successfully rebooted...")
exutil.AssertIfMCPChangesAppliedByName(oc, "master", 600)
} else {
exutil.By("Apply include-performance-profile tuned profile")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", paoIncludePerformanceProfile, "-p", "ROLENAME=worker-optimize")
exutil.By("Assert if the mcp is ready after server has been successfully rebooted...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-optimize", 600)
}
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if profile what's active profile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
if isSNO {
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-control-plane"))
} else {
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node"))
}
exutil.By("Check if tuned pod logs contains Cannot find profile 'openshift-node-performance-optimize' on labeled nodes")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 60, "Cannot find profile 'openshift-node-performance-optimize'")
if isSNO {
exutil.By("Apply performance optimize profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoPerformanceOptimizeFile, "-p", "ROLENAME=master")
exutil.By("Assert if the mcp is ready after server has been successfully rebooted...")
exutil.AssertIfMCPChangesAppliedByName(oc, "master", 600)
} else {
exutil.By("Apply performance optimize profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoPerformanceOptimizeFile, "-p", "ROLENAME=worker-optimize")
exutil.By("Assert if the mcp is ready after server has been successfully rebooted...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-optimize", 600)
}
exutil.By("Check performance profile tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-performance-optimize"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile performance-patch was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "include-performance-profile")
exutil.By("Check if profile what's active profile applied on nodes")
nodeProfileName, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("include-performance-profile"))
exutil.By("Check if contains static tuning from profile 'include-performance-profile' applied in tuned pod logs on labeled nodes")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "20", 60, `static tuning from profile 'include-performance-profile' applied|recommended profile \(include-performance-profile\) matches current configuration`)
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Delete custom MC and MCP by following right way...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-optimize-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-optimize", "worker-optimize", 480)
})
g.It("NonHyperShiftHOST-Author:liqcui-Medium-36152-NTO Get metrics and alerts", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
//get metric information that require ssl auth
sslKey := "/etc/prometheus/secrets/metrics-client-certs/tls.key"
sslCrt := "/etc/prometheus/secrets/metrics-client-certs/tls.crt"
//Get NTO metrics data
exutil.By("Get NTO metrics informaton without ssl, should be denied access, throw error...")
metricsOutput, metricsError := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "sts/prometheus-k8s", "-c", "prometheus", "--", "curl", "-k", "https://node-tuning-operator.openshift-cluster-node-tuning-operator.svc:60000/metrics").Output()
o.Expect(metricsError).Should(o.HaveOccurred())
o.Expect(metricsOutput).NotTo(o.BeEmpty())
o.Expect(metricsOutput).To(o.Or(
o.ContainSubstring("bad certificate"),
o.ContainSubstring("errno = 104"),
o.ContainSubstring("certificate required"),
o.ContainSubstring("error:1409445C"),
o.ContainSubstring("exit code 56"),
o.ContainSubstring("errno = 32")))
exutil.By("Get NTO metrics informaton with ssl key and crt, should be access, get the metric information...")
metricsOutput, metricsError = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "sts/prometheus-k8s", "-c", "prometheus", "--", "curl", "-k", "--key", sslKey, "--cert", sslCrt, "https://node-tuning-operator.openshift-cluster-node-tuning-operator.svc:60000/metrics").Output()
o.Expect(metricsOutput).NotTo(o.BeEmpty())
o.Expect(metricsError).NotTo(o.HaveOccurred())
e2e.Logf("The metrics information of NTO as below: \n%v", metricsOutput)
//Assert the key metrics
exutil.By("Check if all metrics exist as expected...")
o.Expect(metricsOutput).To(o.And(
o.ContainSubstring("nto_build_info"),
o.ContainSubstring("nto_pod_labels_used_info"),
o.ContainSubstring("nto_degraded_info"),
o.ContainSubstring("nto_profile_calculated_total")))
})
g.It("NonPreRelease-Longduration-Author:liqcui-Medium-49265-NTO support automatically rotate ssl certificate. [Disruptive]", func() {
// test requires NTO to be installed
is3CPNoWorker := exutil.Is3MasterNoDedicatedWorkerNode(oc)
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || is3CPNoWorker || isSNO {
g.Skip("NTO is not installed or No need to test on compact cluster - skipping test ...")
}
//Use the last worker node as labeled node
tunedNodeName, err = exutil.GetLastLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The tuned node name is: \n%v", tunedNodeName)
//Get NTO operator pod name
ntoOperatorPod, err := getNTOPodName(oc, ntoNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The tuned operator pod name is: \n%v", ntoOperatorPod)
metricEndpoint := getServiceENDPoint(oc, ntoNamespace)
exutil.By("Get information about the certificate the metrics server in NTO")
openSSLOutputBefore, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "/bin/bash", "-c", "/bin/openssl s_client -connect "+metricEndpoint+" 2>/dev/null </dev/null").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get information about the creation and expiration date of the certificate")
openSSLExpireDateBefore, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "/bin/bash", "-c", "/bin/openssl s_client -connect "+metricEndpoint+" 2>/dev/null </dev/null | /bin/openssl x509 -noout -dates").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The openSSL Expired Date information of NTO openSSL before rotate as below: \n%v", openSSLExpireDateBefore)
encodeBase64OpenSSLOutputBefore := exutil.StringToBASE64(openSSLOutputBefore)
encodeBase64OpenSSLExpireDateBefore := exutil.StringToBASE64(openSSLExpireDateBefore)
//To improve the sucessful rate, execute oc delete secret/node-tuning-operator-tls instead of oc -n openshift-service-ca secret/signing-key
//The last one "oc -n openshift-service-ca secret/signing-key" take more time to complete, but need to manually execute once failed.
exutil.By("Delete secret/node-tuning-operator-tls to automate to create a new one certificate")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "secret/node-tuning-operator-tls").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert NTO logs to match key words restarting metrics server to rotate certificates")
assertNTOPodLogsLastLines(oc, ntoNamespace, ntoOperatorPod, "4", 240, "restarting metrics server to rotate certificates")
exutil.By("Assert if NTO rotate certificates ...")
AssertNTOCertificateRotate(oc, ntoNamespace, tunedNodeName, encodeBase64OpenSSLOutputBefore, encodeBase64OpenSSLExpireDateBefore)
exutil.By("The certificate extracted from the openssl command should match the first certificate from the tls.crt file in the secret")
compareCertificateBetweenOpenSSLandTLSSecret(oc, ntoNamespace, tunedNodeName)
})
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-49371-NTO will not restart tuned daemon when profile application take too long [Disruptive] [Slow]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
//The restart tuned has removed due to timeout in the bug https://issues.redhat.com/browse/OCPBUGS-30647
//Use the first worker node as labeled node
tunedNodeName, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "worker-stuck-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-profile-stuck", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Label the node with worker-stack=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "worker-stuck=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create openshift-profile-stuck profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, workerStackFile)
exutil.By("Check openshift-profile-stuck tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-profile-stuck"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert recommended profile (openshift-profile-stuck) matches current configuration in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(openshift-profile-stuck\) matches current configuration|'openshift-profile-stuck' applied`)
exutil.By("Check if new NTO profile openshift-profile-stuck was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-profile-stuck")
exutil.By("Check if profile what's active profile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-profile-stuck"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("The log shouldn't contain [ timeout (120) to apply TuneD profile; restarting TuneD daemon ] in tuned pod log")
ntoPodLogs, _ := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, tunedPodName, "--tail=10").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ntoPodLogs).NotTo(o.ContainSubstring("timeout (120) to apply TuneD profile; restarting TuneD daemon"))
exutil.By("The log shouldn't contain [ error waiting for tuned: signal: terminated ] in tuned pod log")
ntoPodLogs, _ = oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, tunedPodName, "--tail=10").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ntoPodLogs).NotTo(o.ContainSubstring("error waiting for tuned: signal: terminated"))
})
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-49370-NTO add huge pages to boot time via bootloader [Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or it's Single Node Cluster- skipping test ...")
}
//Use the last worker node as labeled node
tunedNodeName, err := exutil.GetLastLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node that labeled node
//tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-hp", "worker-hp", 300)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-hp-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "hugepages", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Label the node with node-role.kubernetes.io/worker-hp=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-hp=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create hugepages tuned profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, hugepageTunedBoottimeFile)
exutil.By("Check hugepages tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("hugepages"))
exutil.By("Create worker-hp machineconfigpool ...")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, hugepageMCPfile)
exutil.By("Assert if the MCP has been successfully applied ...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-hp", 720)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-hugepages")
exutil.By("Check if profile openshift-node-hugepages applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-hugepages"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check value of allocatable.hugepages-2Mi in labled node ")
nodeHugePagesOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.allocatable.hugepages-2Mi}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeHugePagesOutput).To(o.ContainSubstring("100M"))
oc.SetupProject()
ntoTestNS := oc.Namespace()
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
AppImageName := exutil.GetImagestreamImageName(oc, "tests")
if len(AppImageName) == 0 {
AppImageName = "quay.io/openshifttest/nginx-alpine@sha256:04f316442d48ba60e3ea0b5a67eb89b0b667abf1c198a3d0056ca748736336a0"
}
//Create a hugepages-app application pod
exutil.By("Create a hugepages-app pod to consume hugepage in nto temp namespace")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", hugepage100MPodFile, "-p", "IMAGENAME="+AppImageName)
//Check if hugepages-appis ready
exutil.By("Check if a hugepages-app pod is ready ...")
exutil.AssertPodToBeReady(oc, "hugepages-app", ntoTestNS)
exutil.By("Check the value of /etc/podinfo/hugepages_2M_request, the value expected is 105 ...")
podInfo, err := exutil.RemoteShPod(oc, ntoTestNS, "hugepages-app", "cat", "/etc/podinfo/hugepages_2M_request")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podInfo).To(o.ContainSubstring("105"))
exutil.By("Check the value of REQUESTS_HUGEPAGES in env on pod ...")
envInfo, err := exutil.RemoteShPodWithBash(oc, ntoTestNS, "hugepages-app", "env | grep REQUESTS_HUGEPAGES")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(envInfo).To(o.ContainSubstring("REQUESTS_HUGEPAGES_2Mi=104857600"))
exutil.By("The right way to delete custom MC and MCP...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-hp-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-hp", "worker-hp", 480)
})
g.It("NonPreRelease-Longduration-Author:liqcui-Medium-49439-NTO can start and stop stalld when relying on Tuned '[service]' plugin.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
//Use the first rhcos worker node as labeled node
tunedNodeName, err := exutil.GetFirstCoreOsWorkerNode(oc)
e2e.Logf("tunedNodeName is [ %v ]", tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
if len(tunedNodeName) == 0 {
g.Skip("Skip Testing on RHEL worker or windows node")
}
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-stalld-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-stalld", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer exutil.DebugNodeWithChroot(oc, tunedNodeName, "/usr/bin/throttlectl", "on")
exutil.By("Set off for /usr/bin/throttlectl before enable stalld")
switchThrottlectlOnOff(oc, ntoNamespace, tunedNodeName, "off", 30)
exutil.By("Label the node with node-role.kubernetes.io/worker-stalld=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-stalld=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create openshift-stalld tuned profile")
exutil.CreateNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", stalldTunedFile, "-p", "STALLD_STATUS=start,enable")
exutil.By("Check openshift-stalld tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-stalld"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-stalld")
exutil.By("Check if profile openshift-stalld applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-stalld"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if stalld service is running ...")
stalldStatus, err := exutil.DebugNodeWithChroot(oc, tunedNodeName, "systemctl", "status", "stalld")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stalldStatus).To(o.ContainSubstring("active (running)"))
exutil.By("Apply openshift-stalld with stop,disable tuned profile")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", stalldTunedFile, "-p", "STALLD_STATUS=stop,disable")
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-stalld")
exutil.By("Check if stalld service is inactive and stopped ...")
stalldStatus, _ = exutil.DebugNodeWithOptionsAndChroot(oc, tunedNodeName, []string{"-q", "--to-namespace", ntoNamespace}, "systemctl", "status", "stalld")
o.Expect(stalldStatus).NotTo(o.BeEmpty())
o.Expect(stalldStatus).To(o.ContainSubstring("inactive (dead)"))
exutil.By("Apply openshift-stalld with start,enable tuned profile")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", stalldTunedFile, "-p", "STALLD_STATUS=start,enable")
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-stalld")
exutil.By("Check if stalld service is running again ...")
stalldStatus, _, err = exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"-q", "--to-namespace", ntoNamespace}, "systemctl", "status", "stalld")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stalldStatus).NotTo(o.BeEmpty())
o.Expect(stalldStatus).To(o.ContainSubstring("active (running)"))
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-49441-NTO Applying a profile with multiple inheritance where parents include a common ancestor. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
//trying to include two profiles that share the same parent profile "throughput-performance". An example of such profiles
// are the openshift-node --> openshift --> (virtual-guest) --> throughput-performance and postgresql profiles.
//Use the first worker node as labeled node
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if exutil.IsMachineSetExist(oc) && !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned.openshift.io/openshift-node-postgresql-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-node-postgresql", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Label the node with tuned.openshift.io/openshift-node-postgresql=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned.openshift.io/openshift-node-postgresql=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check postgresql profile /usr/lib/tuned/postgresql/tuned.conf include throughput-performance profile")
postGreSQLProfile, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/postgresql/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(postGreSQLProfile).To(o.ContainSubstring("throughput-performance"))
exutil.By("Check postgresql profile /usr/lib/tuned/openshift-node/tuned.conf include openshift profile")
openshiftNodeProfile, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/openshift-node/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftNodeProfile).To(o.ContainSubstring(`include=openshift`))
exutil.By("Check postgresql profile /usr/lib/tuned/openshift/tuned.conf include throughput-performance profile")
openshiftProfile, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/openshift/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftProfile).To(o.ContainSubstring("throughput-performance"))
exutil.By("Create openshift-node-postgresql tuned profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, openshiftNodePostgresqlFile)
exutil.By("Check openshift-node-postgresql tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-postgresql"))
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-postgresql")
exutil.By("Check if profile openshift-node-postgresql applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-postgresql"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert recommended profile (openshift-node-postgresql) matches current configuration in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 300, `recommended profile \(openshift-node-postgresql\) matches current configuration|static tuning from profile 'openshift-node-postgresql' applied`)
})
g.It("NonHyperShiftHOST-Author:liqcui-Medium-49705-Tuned net plugin handle net devices with n/a value for a channel. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed or hosted cluster - skipping test ...")
}
if iaasPlatform == "vsphere" || iaasPlatform == "openstack" || iaasPlatform == "none" || iaasPlatform == "powervs" {
g.Skip("IAAS platform: " + iaasPlatform + " doesn't support cloud provider profile - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
exutil.By("Check default channel for host network adapter, not expected Combined: 1, if so, skip testing ...")
//assertIFChannelQueuesStatus is used for checking if match Combined: 1
//If match <Combined: 1>, skip testing
isMatch := assertIFChannelQueuesStatus(oc, ntoNamespace, tunedNodeName)
if isMatch {
g.Skip("Only one NIC queues or Unsupported NIC - skipping test ...")
}
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", tunedPodName, "-n", ntoNamespace, "node-role.kubernetes.io/netplugin-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "net-plugin", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Label the node with node-role.kubernetes.io/netplugin=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", tunedPodName, "-n", ntoNamespace, "node-role.kubernetes.io/netplugin=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create net-plugin tuned profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, netPluginFile)
exutil.By("Check net-plugin tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("net-plugin"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert tuned.plugins.base: instance net: assigning devices match in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "180", 300, "tuned.plugins.base: instance net: assigning devices")
exutil.By("Assert active and recommended profile (net-plugin) match in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "180", 300, `profile 'net-plugin' applied|profile \(net-plugin\) match`)
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "net-plugin")
exutil.By("Check if profile net-plugin applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(nodeProfileName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("net-plugin"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check channel for host network adapter, expected Combined: 1")
o.Expect(assertIFChannelQueuesStatus(oc, ntoNamespace, tunedNodeName)).To(o.BeTrue())
exutil.By("Delete tuned net-plugin and check channel for host network adapater again")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "net-plugin", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Check if profile openshift-node|openshift-control-plane applied on nodes")
if isSNO {
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-control-plane")
} else {
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node")
}
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check channel for host network adapter, not expected Combined: 1")
o.Expect(assertIFChannelQueuesStatus(oc, ntoNamespace, tunedNodeName)).To(o.BeFalse())
})
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-49617-NTO support cloud-provider specific profiles for NTO/TuneD. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
if iaasPlatform == "none" {
g.Skip("IAAS platform: " + iaasPlatform + " doesn't support cloud provider profile - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Get cloud provider name ...")
providerName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("profiles.tuned.openshift.io", tunedNodeName, "-n", ntoNamespace, "-ojsonpath={.spec.config.providerName}").Output()
o.Expect(providerName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "provider-"+providerName, "-n", ntoNamespace, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "provider-abc", "-n", ntoNamespace, "--ignore-not-found").Execute()
providerID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.spec.providerID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(providerID).NotTo(o.BeEmpty())
o.Expect(providerID).To(o.ContainSubstring(providerName))
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value should be 8192")
sysctlOutput, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "sysctl", "vm.admin_reserve_kbytes")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(sysctlOutput).NotTo(o.BeEmpty())
o.Expect(sysctlOutput).To(o.ContainSubstring("vm.admin_reserve_kbytes = 8192"))
exutil.By("Apply cloud-provider profile ...")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cloudProviderFile, "-p", "PROVIDER_NAME="+providerName)
exutil.By("Check /var/lib/tuned/provider on target nodes")
openshiftProfile, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/var/lib/ocp-tuned/provider")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftProfile).NotTo(o.BeEmpty())
o.Expect(openshiftProfile).To(o.ContainSubstring(providerName))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check tuned for NTO")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned.tuned.openshift.io").Output()
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current tuned for NTO: \n%v", output)
exutil.By("Check provider + providerName profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(tunedNames).To(o.ContainSubstring("provider-" + providerName))
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value is 16386")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "vm.admin_reserve_kbytes", "16386")
exutil.By("Remove cloud-provider profile, the value of vm.admin_reserve_kbytes rollback to 8192")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "provider-"+providerName, "-n", ntoNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value should be 8192")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "vm.admin_reserve_kbytes", "8192")
exutil.By("Apply cloud-provider-abc profile,the abc doesn't belong to any cloud provider ...")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cloudProviderFile, "-p", "PROVIDER_NAME=abc")
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value should be no change, still is 8192")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "vm.admin_reserve_kbytes", "8192")
})
g.It("Author:liqcui-Medium-45593-NTO Operator set io_timeout for AWS Nitro instances in correct way.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
// currently test is only supported on AWS
if iaasPlatform == "aws" {
exutil.By("Expected /sys/module/nvme_core/parameters/io_timeout value on each node is: 4294967295")
assertIOTimeOutandMaxRetries(oc, ntoNamespace)
} else {
g.Skip("Test Case 45593 doesn't support on other cloud platform, only support aws - skipping test ...")
}
})
g.It("Author:liqcui-Medium-27420-NTO Operator is providing default tuned.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
defaultTunedCreateTimeBefore, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "default", "-n", ntoNamespace, "-ojsonpath={.metadata.creationTimestamp}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultTunedCreateTimeBefore).NotTo(o.BeEmpty())
exutil.By("Delete the default tuned ...")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "default", "-n", ntoNamespace).Execute()
exutil.By("The make sure the tuned default created and ready")
confirmedTunedReady(oc, ntoNamespace, "default", 60)
defaultTunedCreateTimeAfter, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "default", "-n", ntoNamespace, "-ojsonpath={.metadata.creationTimestamp}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultTunedCreateTimeAfter).NotTo(o.BeEmpty())
o.Expect(defaultTunedCreateTimeAfter).NotTo(o.ContainSubstring(defaultTunedCreateTimeBefore))
defaultTunedCreateTimeBefore, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "default", "-n", ntoNamespace, "-ojsonpath={.metadata.creationTimestamp}").Output()
o.Expect(defaultTunedCreateTimeBefore).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
defaultTunedCreateTimeAfter, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "default", "-n", ntoNamespace, "-ojsonpath={.metadata.creationTimestamp}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultTunedCreateTimeAfter).NotTo(o.BeEmpty())
o.Expect(defaultTunedCreateTimeAfter).To(o.ContainSubstring(defaultTunedCreateTimeBefore))
e2e.Logf("defaultTunedCreateTimeBefore is : %v defaultTunedCreateTimeAfter is: %v", defaultTunedCreateTimeBefore, defaultTunedCreateTimeAfter)
})
g.It("NonHyperShiftHOST-Author:liqcui-Medium-41552-NTO Operator Report per-node Tuned profile application status[Disruptive].", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
is3Master := exutil.Is3MasterNoDedicatedWorkerNode(oc)
masterNodeName := getFirstMasterNodeName(oc)
defaultMasterProfileName := getDefaultProfileNameOnMaster(oc, masterNodeName)
//NTO will provides two default tuned, one is openshift-control-plane, another is openshift-node
exutil.By("Check the default tuned profile list per nodes")
profileOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("profiles.tuned.openshift.io", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileOutput).NotTo(o.BeEmpty())
if isSNO || is3Master {
o.Expect(profileOutput).To(o.ContainSubstring(defaultMasterProfileName))
} else {
o.Expect(profileOutput).To(o.ContainSubstring("openshift-control-plane"))
o.Expect(profileOutput).To(o.ContainSubstring("openshift-node"))
}
})
g.It("NonHyperShiftHOST-Author:liqcui-Medium-50052-NTO RHCOS-shipped stalld systemd units should use SCHED_FIFO to run stalld[Disruptive].", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
if iaasPlatform == "vsphere" || iaasPlatform == "none" {
g.Skip("IAAS platform: " + iaasPlatform + " doesn't support cloud provider profile - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
e2e.Logf("tunedNodeName is [ %v ]", tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
if len(tunedNodeName) == 0 {
g.Skip("Skip Testing on RHEL worker or windows node")
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-stalld-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-stalld", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer exutil.DebugNodeRetryWithOptionsAndChroot(oc, tunedNodeName, []string{"-q"}, "/usr/bin/throttlectl", "on")
//Switch off throttlectl to improve sucessfull rate of stalld starting
exutil.By("Set off for /usr/bin/throttlectl before enable stalld")
switchThrottlectlOnOff(oc, ntoNamespace, tunedNodeName, "off", 30)
exutil.By("Label the node with node-role.kubernetes.io/worker-stalld=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-stalld=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create openshift-stalld tuned profile")
exutil.CreateNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", stalldTunedFile, "-p", "STALLD_STATUS=start,enable")
exutil.By("Check openshift-stalld tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-stalld"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-stalld")
exutil.By("Check if profile openshift-stalld applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).NotTo(o.BeEmpty())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-stalld"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if stalld service is running ...")
stalldStatus, _, err := exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"-q", "--to-namespace=" + ntoNamespace}, "systemctl", "status", "stalld")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stalldStatus).NotTo(o.BeEmpty())
o.Expect(stalldStatus).To(o.ContainSubstring("active (running)"))
exutil.By("Get stalld PID on labeled node ...")
stalldPIDStatus, _, err := exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"-q", "--to-namespace=" + ntoNamespace}, "/bin/bash", "-c", "ps -efZ | grep stalld | grep -v grep")
e2e.Logf("stalldPIDStatus is :\n%v", stalldPIDStatus)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stalldPIDStatus).NotTo(o.BeEmpty())
o.Expect(stalldPIDStatus).NotTo(o.ContainSubstring("unconfined_service_t"))
o.Expect(stalldPIDStatus).To(o.ContainSubstring("-t 20"))
exutil.By("Get stalld PID on labeled node ...")
stalldPID, _, err := exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"-q", "--to-namespace=" + ntoNamespace}, "/bin/bash", "-c", "ps -efL| grep stalld | grep -v grep | awk '{print $2}'")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stalldPID).NotTo(o.BeEmpty())
exutil.By("Get status of chrt -p stalld PID on labeled node ...")
chrtStalldPIDOutput, _, err := exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"-q", "--to-namespace=" + ntoNamespace}, "/bin/bash", "-c", "chrt -ap "+stalldPID)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(chrtStalldPIDOutput).NotTo(o.BeEmpty())
o.Expect(chrtStalldPIDOutput).To(o.ContainSubstring("SCHED_FIFO"))
e2e.Logf("chrtStalldPIDOutput is :\n%v", chrtStalldPIDOutput)
})
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-51495-NTO PAO Shipped into NTO with basic function verification.[Disruptive][Slow].", func() {
var (
paoBaseProfileMCP = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile-mcp.yaml")
paoBaseProfile = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile.yaml")
paoBaseQoSPod = exutil.FixturePath("testdata", "psap", "pao", "pao-baseqos-pod.yaml")
)
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
skipPAODeploy := skipDeployPAO(oc)
isPAOInstalled = exutil.IsPAOInstalled(oc)
if skipPAODeploy || isPAOInstalled {
e2e.Logf("PAO has been installed and continue to execute test case")
} else {
isPAOInOperatorHub := exutil.IsPAOInOperatorHub(oc)
if !isPAOInOperatorHub {
g.Skip("PAO is not in OperatorHub - skipping test ...")
}
exutil.InstallPAO(oc, paoNamespace)
}
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-pao", "worker-pao", 480)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("performanceprofile", "pao-baseprofile", "--ignore-not-found").Execute()
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get how many cpus on the specified worker node
exutil.By("Get how many cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCPUCoresInt <= 1 {
g.Skip("the worker node don't have enough cpus - skipping test ...")
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
// //Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao-").Execute()
exutil.By("Label the node with node-role.kubernetes.io/worker-pao=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// currently test is only supported on AWS, GCP, and Azure
ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
//Only GCP and AWS support realtime-kenel
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=true")
} else {
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=false")
}
exutil.By("Check Performance Profile pao-baseprofile was created automatically")
paoBasePerformanceProfile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(paoBasePerformanceProfile).NotTo(o.BeEmpty())
o.Expect(paoBasePerformanceProfile).To(o.ContainSubstring("pao-baseprofile"))
exutil.By("Create machine config pool worker-pao")
exutil.ApplyOperatorResourceByYaml(oc, "", paoBaseProfileMCP)
exutil.By("Assert if machine config pool applied for worker nodes")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-pao", 1200)
exutil.By("Check openshift-node-performance-pao-baseprofile tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check current profile openshift-node-performance-pao-baseprofile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile openshift-node-performance-pao-baseprofile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-performance-pao-baseprofile")
exutil.By("Check if profile openshift-node-performance-pao-baseprofile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check value of allocatable.hugepages-1Gi in labled node ")
nodeHugePagesOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.allocatable.hugepages-1Gi}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeHugePagesOutput).To(o.ContainSubstring("1Gi"))
exutil.By("Check Settings of CPU Manager policy created by PAO in labled node ")
cpuManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep cpuManager").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerPolicy"))
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerReconcilePeriod"))
e2e.Logf("The settings of CPU Manager Policy on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of CPU Manager for reservedSystemCPUs created by PAO in labled node ")
cpuManagerConfOutput, err = oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep reservedSystemCPUs").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("reservedSystemCPUs"))
e2e.Logf("The settings of CPU Manager reservedSystemCPUs on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of Topology Manager for topologyManagerPolicy created by PAO in labled node ")
topologyManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep topologyManagerPolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(topologyManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(topologyManagerConfOutput).To(o.ContainSubstring("topologyManagerPolicy"))
e2e.Logf("The settings of CPU Manager topologyManagerPolicy on labeled nodes: \n%v", topologyManagerConfOutput)
// currently test is only supported on AWS, GCP, and Azure
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).To(o.Or(o.ContainSubstring("rt")))
} else {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).NotTo(o.Or(o.ContainSubstring("rt")))
}
exutil.By("Check runtimeClass setting that created by PAO ... ")
runtimeClassOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile", "pao-baseprofile", "-ojsonpath={.status.runtimeClass}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(runtimeClassOutput).NotTo(o.BeEmpty())
o.Expect(runtimeClassOutput).To(o.ContainSubstring("performance-pao-baseprofile"))
e2e.Logf("The settings of runtimeClass on labeled nodes: \n%v", runtimeClassOutput)
exutil.By("Check allocable system resouce on labeled node ... ")
allocableResource, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.allocatable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(allocableResource).NotTo(o.BeEmpty())
e2e.Logf("The allocable system resouce on labeled node: \n%v", allocableResource)
oc.SetupProject()
ntoTestNS := oc.Namespace()
//Create a guaranteed-pod application pod
exutil.By("Create a guaranteed-pod pod into temp namespace")
exutil.ApplyOperatorResourceByYaml(oc, ntoTestNS, paoBaseQoSPod)
//Check if guaranteed-pod is ready
exutil.By("Check if a guaranteed-pod pod is ready ...")
exutil.AssertPodToBeReady(oc, "guaranteed-pod", ntoTestNS)
exutil.By("Check the cpu bind to isolation CPU zone for a guaranteed-pod")
cpuManagerStateOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /var/lib/kubelet/cpu_manager_state").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerStateOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerStateOutput).To(o.ContainSubstring("guaranteed-pod"))
e2e.Logf("The settings of CPU Manager cpuManagerState on labeled nodes: \n%v", cpuManagerStateOutput)
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Delete custom MC and MCP by following correct logic ...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-pao", "worker-pao", 480)
})
g.It("NonHyperShiftHOST-Author:liqcui-Medium-53053-NTO will automatically delete profile with unknown/stuck state. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
if iaasPlatform == "none" {
g.Skip("IAAS platform: " + iaasPlatform + " doesn't support cloud provider profile - skipping test ...")
}
var (
ntoUnknownProfile = exutil.FixturePath("testdata", "psap", "nto", "nto-unknown-profile.yaml")
)
//Get NTO operator pod name
ntoOperatorPod, err := getNTOPodName(oc, ntoNamespace)
o.Expect(ntoOperatorPod).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Get cloud provider name ...")
providerName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("profiles.tuned.openshift.io", tunedNodeName, "-n", ntoNamespace, "-ojsonpath={.spec.config.providerName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(providerName).NotTo(o.BeEmpty())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("profiles.tuned.openshift.io", "worker-does-not-exist-openshift-node", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Apply worker-does-not-exist-openshift-node profile ...")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", ntoUnknownProfile, "-p", "PROVIDER_NAME="+providerName)
exutil.By("The profile worker-does-not-exist-openshift-node will be deleted automatically once created.")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).NotTo(o.ContainSubstring("worker-does-not-exist-openshift-node"))
exutil.By("Assert NTO logs to match key words Node 'worker-does-not-exist-openshift-node' not found")
assertNTOPodLogsLastLines(oc, ntoNamespace, ntoOperatorPod, "4", 120, " Node \"worker-does-not-exist-openshift-node\" not found")
})
g.It("NonPreRelease-Longduration-Author:liqcui-Medium-59884-NTO Cgroup Blacklist multiple regular expression. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
oc.SetupProject()
ntoTestNS := oc.Namespace()
//Get the tuned pod name that run on first worker node
tunedNodeName, err := exutil.GetLastLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
// AppImageName := exutil.GetImagestreamImageName(oc, "tests")
// if len(AppImageName) == 0 {
AppImageName := "quay.io/openshifttest/nginx-alpine@sha256:04f316442d48ba60e3ea0b5a67eb89b0b667abf1c198a3d0056ca748736336a0"
// }
//Get how many cpus on the specified worker node
exutil.By("Get how many cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCPUCoresInt <= 1 {
g.Skip("the worker node don't have enough cpus - skipping test ...")
}
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Remove custom profile (if not already removed) and remove node label")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "-n", ntoNamespace, "cgroup-scheduler-blacklist").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned-scheduler-node-").Execute()
exutil.By("Label the specified linux node with label tuned-scheduler-node")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned-scheduler-node=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// setting cgroup_ps_blacklist=/kubepods\.slice/kubepods-burstable\.slice/;/system\.slice/
// the process belong the /kubepods\.slice/kubepods-burstable\.slice/ or /system\.slice/ can consume all cpuset
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
// the process doesn't belong the /kubepods\.slice/kubepods-burstable\.slice/ or /system\.slice/ can consume all cpuset
// The expected Cpus_allowed_list in /proc/$PID/status should be 0 or 0,2-N
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", ntoTestNS, "app-web", "--ignore-not-found").Execute()
exutil.By("Create pod that deletect the value of kernel.pid_max ")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", cgroupSchedulerBestEffortPod, "-p", "IMAGE_NAME="+AppImageName)
//Check if nginx pod is ready
exutil.By("Check if best effort pod is ready...")
exutil.AssertPodToBeReady(oc, "app-web", ntoTestNS)
exutil.By("Create NTO custom tuned profile cgroup-scheduler-blacklist")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cgroupSchedulerBacklist, "-p", "PROFILE_NAME=cgroup-scheduler-blacklist", `CGROUP_BLACKLIST=/kubepods\.slice/kubepods-burstable\.slice/;/system\.slice/`)
exutil.By("Check if NTO custom tuned profile cgroup-scheduler-blacklist was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "cgroup-scheduler-blacklist")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
exutil.By("Verified the cpu allow list in cgroup black list for tuned ...")
o.Expect(assertProcessInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "tuned", nodeCPUCoresInt)).To(o.Equal(true))
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
exutil.By("Verified the cpu allow list in cgroup black list for chronyd ...")
o.Expect(assertProcessInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "chronyd", nodeCPUCoresInt)).To(o.Equal(true))
// The expected Cpus_allowed_list in /proc/$PID/status should be 0 or 0,2-N
exutil.By("Verified the cpu allow list in cgroup black list for nginx process...")
o.Expect(assertProcessNOTInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "nginx| tail -1", nodeCPUCoresInt)).To(o.Equal(true))
})
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-60743-NTO No race to update MC when nodes with different number of CPUs are in the same MCP. [Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
haveMachineSet := exutil.IsMachineSetExist(oc)
if !haveMachineSet {
g.Skip("No machineset found, skipping test ...")
}
// currently test is only supported on AWS, GCP, Azure, ibmcloud, alibabacloud
supportPlatforms := []string{"aws", "gcp", "azure", "ibmcloud", "alibabacloud"}
if !exutil.ImplStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get NTO Operator Pod Name
ntoOperatorPodName := getNTOOperatorPodName(oc, ntoNamespace)
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-diffcpus", "worker-diffcpus", 480)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-diffcpus-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-bootcmdline-cpu", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("machineset", "ocp-psap-qe-diffcpus", "-n", "openshift-machine-api", "--ignore-not-found").Execute()
exutil.By("Create openshift-bootcmdline-cpu tuned profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, nodeDiffCPUsTunedBootFile)
exutil.By("Create machine config pool")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", nodeDiffCPUsMCPFile, "-p", "MCP_NAME=worker-diffcpus")
exutil.By("Label the last node with node-role.kubernetes.io/worker-diffcpus=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-diffcpus=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a new machineset with different instance type.")
newMachinesetInstanceType := exutil.SpecifyMachinesetWithDifferentInstanceType(oc)
e2e.Logf("4 newMachinesetInstanceType is %v, ", newMachinesetInstanceType)
o.Expect(newMachinesetInstanceType).NotTo(o.BeEmpty())
exutil.CreateMachinesetbyInstanceType(oc, "ocp-psap-qe-diffcpus", newMachinesetInstanceType)
exutil.By("Wait for new node is ready when machineset created")
//1 means replicas=1
clusterinfra.WaitForMachinesRunning(oc, 1, "ocp-psap-qe-diffcpus")
exutil.By("Label the second node with node-role.kubernetes.io/worker-diffcpus=")
secondTunedNodeName := exutil.GetNodeNameByMachineset(oc, "ocp-psap-qe-diffcpus")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", secondTunedNodeName, "node-role.kubernetes.io/worker-diffcpus-", "--overwrite").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", secondTunedNodeName, "node-role.kubernetes.io/worker-diffcpus=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert if the status of adding the two worker node into worker-diffcpus mcp, mcp applied")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-diffcpus", 480)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert if openshift-bootcmdline-cpu profile was applied ...")
//Verify if the new profile is applied
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-bootcmdline-cpu")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("openshift-bootcmdline-cpu"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
assertNTOPodLogsLastLines(oc, ntoNamespace, ntoOperatorPodName, "25", 180, "Nodes in MCP worker-diffcpus agree on bootcmdline: cpus=")
//Comment out with an known issue, until it was fixed
exutil.By("Assert if cmdline was applied in machineconfig...")
AssertTunedAppliedMC(oc, "nto-worker-diffcpus", "cpus=")
exutil.By("Assert if cmdline was applied in labled node...")
o.Expect(AssertTunedAppliedToNode(oc, tunedNodeName, "cpus=")).To(o.Equal(true))
exutil.By("<Profiles with bootcmdline conflict> warn message will show in oc get co/node-tuning")
assertCoStatusWithKeywords(oc, "Profiles with bootcmdline conflict")
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
//Verify if the <Profiles with bootcmdline conflict> warn message disapper after removing custom tuned profile
exutil.By("Delete openshift-bootcmdline-cpu tuned in labled node...")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-bootcmdline-cpu", "-n", ntoNamespace, "--ignore-not-found").Execute()
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Removing custom MC and MCP from mcp worker-diffcpus...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-diffcpus-").Execute()
//remove node from mcp worker-diffcpus
//To reduce time using delete machineset instead of unlabel secondTunedNodeName node
oc.AsAdmin().WithoutNamespace().Run("delete").Args("machineset", "ocp-psap-qe-diffcpus", "-n", "openshift-machine-api", "--ignore-not-found").Execute()
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", secondTunedNodeName, "node-role.kubernetes.io/worker-diffcpus-").Execute()
exutil.By("Assert if first worker node return to worker mcp")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 480)
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("<Profiles with bootcmdline conflict> warn message will disappear after removing worker node from mcp worker-diffcpus")
assertCONodeTuningStatusWithoutWARNWithRetry(oc, 180, "Profiles with bootcmdline conflict")
exutil.By("Assert if isolcpus was applied in labled node...")
o.Expect(AssertTunedAppliedToNode(oc, tunedNodeName, "cpus=")).To(o.Equal(false))
})
g.It("Author:liqcui-Medium-63223-NTO support tuning sysctl and kernel bools that applied to all nodes of nodepool-level settings in hypershift.", func() {
//This is a ROSA HCP pre-defined case, only check result, ROSA team will create NTO tuned profile when ROSA HCP created, remove Disruptive
//Only execute on ROSA hosted cluster
isROSA := isROSAHostedCluster(oc)
if !isROSA {
g.Skip("It's not ROSA hosted cluster - skipping test ...")
}
//For ROSA Environment, we are unable to access management cluster, so discussed with ROSA team,
//ROSA team create pre-defined configmap and applied to specified nodepool with hardcode profile name.
//NTO will only check if all setting applied to the worker node on hosted cluster.
exutil.By("Check if the tuned hc-nodepool-vmdratio is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.And(o.ContainSubstring("hc-nodepool-vmdratio"),
o.ContainSubstring("tuned-hugepages")))
appliedProfileList, err := oc.AsAdmin().Run("get").Args("profiles.tuned.openshift.io", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(appliedProfileList).NotTo(o.BeEmpty())
o.Expect(appliedProfileList).To(o.And(o.ContainSubstring("hc-nodepool-vmdratio"),
o.ContainSubstring("openshift-node-hugepages")))
exutil.By("Get the node name that applied to the profile hc-nodepool-vmdratio")
tunedNodeNameStdOut, err := oc.AsAdmin().Run("get").Args("profiles.tuned.openshift.io", "-n", ntoNamespace, `-ojsonpath='{.items[?(@..status.tunedProfile=="hc-nodepool-vmdratio")].metadata.name}'`).Output()
tunedNodeName := strings.Trim(tunedNodeNameStdOut, "'")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
exutil.By("Assert the value of sysctl vm.dirty_ratio, the expecte value should be 55")
debugNodeStdout, err := oc.AsAdmin().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "sysctl", "vm.dirty_ratio").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The value of sysctl vm.dirty_ratio on node %v is: \n%v\n", tunedNodeName, debugNodeStdout)
o.Expect(debugNodeStdout).To(o.ContainSubstring("vm.dirty_ratio = 55"))
exutil.By("Get the node name that applied to the profile openshift-node-hugepages")
tunedNodeNameStdOut, err = oc.AsAdmin().Run("get").Args("profiles.tuned.openshift.io", "-n", ntoNamespace, `-ojsonpath='{.items[?(@..status.tunedProfile=="openshift-node-hugepages")].metadata.name}'`).Output()
tunedNodeName = strings.Trim(tunedNodeNameStdOut, "'")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
exutil.By("Assert the value of cat /proc/cmdline, the expecte value should be hugepagesz=2M hugepages=50")
debugNodeStdout, err = oc.AsAdmin().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "cat", "/proc/cmdline").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The value of /proc/cmdline on node %v is: \n%v\n", tunedNodeName, debugNodeStdout)
o.Expect(debugNodeStdout).To(o.ContainSubstring("hugepagesz=2M hugepages=50"))
})
g.It("ROSA-NonHyperShiftHOST-Author:sahshah-Medium-64908-NTO Expose tuned socket interface.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
exutil.By("Pick one worker node to label")
tunedNodeName, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
//Clean up resources
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "-n", ntoNamespace, "tuning-maxpid").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning-").Execute()
//Label the node with node-role.kubernetes.io/worker-tuning
exutil.By("Label the node with node-role.kubernetes.io/worker-tuning=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//Apply new profile that match label node-role.kubernetes.io/worker-tuning=
exutil.By("Create tuning-maxpid profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, tuningMaxPidFile)
//NTO will provides two default tuned, one is default
exutil.By("Check the default tuned list, expected tuning-maxpid")
allTuneds, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(allTuneds).To(o.ContainSubstring("tuning-maxpid"))
exutil.By("Check if new profile tuning-maxpid applied to labeled node")
//Verify if the new profile is applied
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-maxpid")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("tuning-maxpid"))
exutil.By("Get current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check the custom profile as expected by debugging the node ")
printfString := fmt.Sprintf(`printf '{"jsonrpc": "2.0", "method": "active_profile", "id": 1}' | nc -U /run/tuned/tuned.sock`)
printfStringStdOut, err := exutil.RemoteShPodWithBash(oc, ntoNamespace, tunedPodName, printfString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(printfStringStdOut).NotTo(o.BeEmpty())
o.Expect(printfStringStdOut).To(o.ContainSubstring("tuning-maxpid"))
e2e.Logf("printfStringStdOut is :\n%v", printfStringStdOut)
})
g.It("ROSA-NonHyperShiftHOST-Author:liqcui-Medium-65371-NTO TuneD prevent from reverting node level profiles on termination [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
//Use the last worker node as labeled node
var (
tunedNodeName string
err error
)
isSNO := exutil.IsSNOCluster(oc)
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
oc.SetupProject()
ntoTestNS := oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "tuning-pidmax", "-n", ntoNamespace, "--ignore-not-found").Execute()
ntoRes := ntoResource{
name: "tuning-pidmax",
namespace: ntoNamespace,
template: ntoTunedPidMax,
sysctlparm: "kernel.pid_max",
sysctlvalue: "181818",
}
exutil.By("Label the node with node-role.kubernetes.io/worker-tuning=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create tuning-pidmax profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, ntoTunedPidMax)
exutil.By("Create tuning-pidmax profile tuning-pidmax applied to nodes")
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-pidmax", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
AppImageName := exutil.GetImagestreamImageName(oc, "tests")
clusterVersion, _, err := exutil.GetClusterVersion(oc)
e2e.Logf("Current clusterVersion is [ %v ]", clusterVersion)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(clusterVersion).NotTo(o.BeEmpty())
exutil.By("Create pod that deletect the value of kernel.pid_max ")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", podSysctlFile, "-p", "IMAGE_NAME="+AppImageName, "RUNASNONROOT=true")
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
//Check if sysctlpod pod is ready
exutil.AssertPodToBeReady(oc, "sysctlpod", ntoTestNS)
exutil.By("Get the sysctlpod status")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoTestNS, "pods").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The status of pod sysctlpod: \n%v", output)
exutil.By("Check the the value of kernel.pid_max in the pod sysctlpod, the expected value should be kernel.pid_max = 181818")
podLogStdout, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("sysctlpod", "--tail=1", "-n", ntoTestNS).Output()
e2e.Logf("Logs of sysctlpod before delete tuned pod is [ %v ]", podLogStdout)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podLogStdout).NotTo(o.BeEmpty())
o.Expect(podLogStdout).To(o.ContainSubstring("kernel.pid_max = 181818"))
exutil.By("Delete tuned pod on the labeled node, and make sure the kernel.pid_max don't revert to origin value")
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", tunedPodName, "-n", ntoNamespace).Execute()).NotTo(o.HaveOccurred())
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check tuned pod status after delete tuned pod")
//Get the tuned pod name in the same node that labeled node
tunedPodName = getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//Check if tuned pod that deleted is ready
exutil.AssertPodToBeReady(oc, tunedPodName, ntoNamespace)
exutil.By("Check the the value of kernel.pid_max in the pod sysctlpod again, the expected value still be kernel.pid_max = 181818")
podLogStdout, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args("sysctlpod", "--tail=2", "-n", ntoTestNS).Output()
e2e.Logf("Logs of sysctlpod after delete tuned pod is [ %v ]", podLogStdout)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podLogStdout).NotTo(o.BeEmpty())
o.Expect(podLogStdout).To(o.ContainSubstring("kernel.pid_max = 181818"))
o.Expect(podLogStdout).NotTo(o.ContainSubstring("kernel.pid_max not equal 181818"))
})
g.It("Longduration-NonPreRelease-PreChkUpgrade-Author:liqcui-Medium-49618-TELCO N-1 - Pre Check for PAO shipped with NTO to support upgrade.[Telco][Disruptive][Slow].", func() {
var (
paoBaseProfileMCP = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile-mcp.yaml")
paoBaseProfile = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile.yaml")
)
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
// currently test is only supported on AWS, GCP, Azure, ibmcloud, alibabacloud
supportPlatforms := []string{"aws", "gcp", "azure", "ibmcloud", "alibabacloud"}
if !exutil.ImplStringArrayContains(supportPlatforms, iaasPlatform) || isSNO {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
totalLinuxWorkerNode := exutil.CountLinuxWorkerNodeNumByOS(oc)
totalLinuxWorkerNodes := strconv.Itoa(totalLinuxWorkerNode)
if totalLinuxWorkerNode < 3 {
g.Skip("The total linux worker node is " + totalLinuxWorkerNodes + ". The OCP do not have enough worker node, skip it.")
}
tunedNodeName := choseOneWorkerNodeToRunCase(oc, 0)
//Get how many cpus on the specified worker node
exutil.By("Get the number of cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current cpus cores of worker node is %v", nodeCPUCoresInt)
if nodeCPUCoresInt < 4 {
g.Skip("the worker node doesn't have enough cpus - skipping test ...")
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Label the node with node-role.kubernetes.io/worker-pao=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create machine config pool worker-pao")
exutil.ApplyOperatorResourceByYaml(oc, "", paoBaseProfileMCP)
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-pao", 300)
// currently test is only supported on AWS, GCP, and Azure
ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
//Only GCP and AWS support realtime-kenel
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=true")
} else {
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=false")
}
exutil.By("Check Performance Profile pao-baseprofile was created automatically")
paoBasePerformanceProfile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(paoBasePerformanceProfile).NotTo(o.BeEmpty())
o.Expect(paoBasePerformanceProfile).To(o.ContainSubstring("pao-baseprofile"))
exutil.By("Assert if machine config pool applied to worker nodes that label with worker-pao")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-pao", 1800)
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 300)
exutil.AssertIfMCPChangesAppliedByName(oc, "master", 720)
exutil.By("Check openshift-node-performance-pao-baseprofile tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check current profile openshift-node-performance-pao-baseprofile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile openshift-node-performance-pao-baseprofile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-performance-pao-baseprofile")
exutil.By("Check if profile openshift-node-performance-pao-baseprofile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check value of allocatable.hugepages-1Gi in labled node ")
nodeHugePagesOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.allocatable.hugepages-1Gi}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeHugePagesOutput).To(o.ContainSubstring("1Gi"))
exutil.By("Check Settings of CPU Manager policy created by PAO in labled node ")
cpuManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep cpuManager").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerPolicy"))
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerReconcilePeriod"))
e2e.Logf("The settings of CPU Manager Policy on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of CPU Manager for reservedSystemCPUs created by PAO in labled node ")
cpuManagerConfOutput, err = oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep reservedSystemCPUs").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("reservedSystemCPUs"))
e2e.Logf("The settings of CPU Manager reservedSystemCPUs on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of Topology Manager for topologyManagerPolicy created by PAO in labled node ")
topologyManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep topologyManagerPolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(topologyManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(topologyManagerConfOutput).To(o.ContainSubstring("topologyManagerPolicy"))
e2e.Logf("The settings of CPU Manager topologyManagerPolicy on labeled nodes: \n%v", topologyManagerConfOutput)
// currently test is only supported on AWS, GCP, and Azure
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).To(o.Or(o.ContainSubstring("rt")))
} else {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).NotTo(o.Or(o.ContainSubstring("rt")))
}
exutil.By("Check runtimeClass setting that created by PAO ... ")
runtimeClassOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile", "pao-baseprofile", "-ojsonpath={.status.runtimeClass}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(runtimeClassOutput).NotTo(o.BeEmpty())
o.Expect(runtimeClassOutput).To(o.ContainSubstring("performance-pao-baseprofile"))
e2e.Logf("The settings of runtimeClass on labeled nodes: \n%v", runtimeClassOutput)
exutil.By("Check Kernel boot settings passed into /proc/cmdline in labled node ")
kernelCMDLineStdout, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "cat", "/proc/cmdline").Output()
e2e.Logf("The settings of Kernel boot passed into /proc/cmdline on labeled nodes: \n%v", kernelCMDLineStdout)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kernelCMDLineStdout).NotTo(o.BeEmpty())
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("tsc=reliable"))
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("isolcpus="))
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("hugepagesz=1G"))
//o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("nosmt"))
// - nosmt removed nosmt to improve succeed rate due to limited cpu cores
// but manually renabled when have enough cpu cores
})
g.It("Longduration-NonPreRelease-PstChkUpgrade-Author:liqcui-Medium-49618-TELCO N-1 - Post Check for PAO shipped with NTO to support upgrade.[Telco][Disruptive][Slow].", func() {
if !isNTO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
// currently test is only supported on AWS, GCP, Azure, ibmcloud, alibabacloud
supportPlatforms := []string{"aws", "gcp", "azure", "ibmcloud", "alibabacloud"}
if !exutil.ImplStringArrayContains(supportPlatforms, iaasPlatform) || isSNO {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
totalLinuxWorkerNode := exutil.CountLinuxWorkerNodeNumByOS(oc)
totalLinuxWorkerNodes := strconv.Itoa(totalLinuxWorkerNode)
if totalLinuxWorkerNode < 3 {
g.Skip("The total linux worker node is " + totalLinuxWorkerNodes + ". The OCP do not have enough worker node, skip it.")
}
tunedNodeName, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker-pao", "-ojsonpath={.items[*].metadata.name}").Output()
if len(tunedNodeName) == 0 {
g.Skip("No labeled node was found, skipping testing ...")
} else {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao-").Execute()
}
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-pao", "worker-pao", 480)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("performanceprofile", "pao-baseprofile", "--ignore-not-found").Execute()
exutil.By("Check If Performance Profile pao-baseprofile and cloud-provider exist during Post Check Phase")
paoBasePerformanceProfile, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile").Output()
if !strings.Contains(paoBasePerformanceProfile, "pao-baseprofile") {
g.Skip("No Performancerofile found skipping test ...")
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Assert if machine config pool applied for worker nodes")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-pao", 1200)
exutil.By("Check openshift-node-performance-pao-baseprofile tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check current profile openshift-node-performance-pao-baseprofile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile openshift-node-performance-pao-baseprofile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-performance-pao-baseprofile")
exutil.By("Check if profile openshift-node-performance-pao-baseprofile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check value of allocatable.hugepages-1Gi in labled node ")
nodeHugePagesOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.allocatable.hugepages-1Gi}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeHugePagesOutput).To(o.ContainSubstring("1Gi"))
exutil.By("Check Settings of CPU Manager policy created by PAO in labled node ")
cpuManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep cpuManager").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerPolicy"))
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerReconcilePeriod"))
e2e.Logf("The settings of CPU Manager Policy on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of CPU Manager for reservedSystemCPUs created by PAO in labled node ")
cpuManagerConfOutput, err = oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep reservedSystemCPUs").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("reservedSystemCPUs"))
e2e.Logf("The settings of CPU Manager reservedSystemCPUs on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of Topology Manager for topologyManagerPolicy created by PAO in labled node ")
topologyManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep topologyManagerPolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(topologyManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(topologyManagerConfOutput).To(o.ContainSubstring("topologyManagerPolicy"))
e2e.Logf("The settings of CPU Manager topologyManagerPolicy on labeled nodes: \n%v", topologyManagerConfOutput)
// currently test is only supported on AWS, GCP, and Azure
ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).To(o.Or(o.ContainSubstring("rt")))
} else {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).NotTo(o.Or(o.ContainSubstring("rt")))
}
exutil.By("Check runtimeClass setting that created by PAO ... ")
runtimeClassOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile", "pao-baseprofile", "-ojsonpath={.status.runtimeClass}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(runtimeClassOutput).NotTo(o.BeEmpty())
o.Expect(runtimeClassOutput).To(o.ContainSubstring("performance-pao-baseprofile"))
e2e.Logf("The settings of runtimeClass on labeled nodes: \n%v", runtimeClassOutput)
exutil.By("Check Kernel boot settings passed into /proc/cmdline in labled node ")
kernelCMDLineStdout, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "cat", "/proc/cmdline").Output()
e2e.Logf("The settings of Kernel boot passed into /proc/cmdline on labeled nodes: \n%v", kernelCMDLineStdout)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kernelCMDLineStdout).NotTo(o.BeEmpty())
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("tsc=reliable"))
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("isolcpus="))
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("hugepagesz=1G"))
//o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("nosmt"))
// - nosmt removed nosmt to improve succeed rate due to limited cpu cores
// but manually renabled when have enough cpu cores
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Delete custom MC and MCP by following correct logic ...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-pao", "worker-pao", 480)
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 600)
})
g.It("NonPreRelease-PreChkUpgrade-Author:liqcui-Medium-21995-Pre Check for basic NTO function to Upgrade OCP Cluster[Disruptive].", func() {
// currently test is only supported on AWS, GCP, Azure, ibmcloud, alibabacloud
supportPlatforms := []string{"aws", "gcp", "azure", "ibmcloud", "alibabacloud"}
if !exutil.ImplStringArrayContains(supportPlatforms, iaasPlatform) || !isNTO {
g.Skip("NTO is not installed or IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
tunedNodeName := choseOneWorkerNodeToRunCase(oc, 1)
paoNodeName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker-pao", "-ojsonpath={.items[*].metadata.name}").Output()
if len(tunedNodeName) == 0 || tunedNodeName == paoNodeName {
g.Skip("No suitable worker node was found in : " + iaasPlatform + " - skipping test ...")
}
exutil.By("Label the node with node-role.kubernetes.io/worker-tuning=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
ntoRes := ntoResource{
name: "tuning-pidmax",
namespace: ntoNamespace,
template: ntoSysctlTemplate,
sysctlparm: "kernel.pid_max",
sysctlvalue: "282828",
label: "node-role.kubernetes.io/worker-tuning",
}
exutil.By("Create tuning-pidmax profile")
ntoRes.applyNTOTunedProfile(oc)
exutil.By("Create tuning-pidmax profile tuning-pidmax applied to nodes")
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-pidmax", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Compare if the value kernel.pid_max in on labeled node, should be 282828")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.pid_max", "282828")
exutil.By("Get cloud provider name ...")
providerName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("profiles.tuned.openshift.io", tunedNodeName, "-n", ntoNamespace, "-ojsonpath={.spec.config.providerName}").Output()
o.Expect(providerName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
providerID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.spec.providerID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(providerID).NotTo(o.BeEmpty())
o.Expect(providerID).To(o.ContainSubstring(providerName))
exutil.By("Apply cloud-provider profile ...")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cloudProviderFile, "-p", "PROVIDER_NAME="+providerName)
exutil.By("Check provider + providerName profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(tunedNames).To(o.ContainSubstring("provider-" + providerName))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value is 16386")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "vm.admin_reserve_kbytes", "16386")
})
g.It("NonPreRelease-PstChkUpgrade-Author:liqcui-Medium-21995-Post Check for basic NTO function to Upgrade OCP Cluster[Disruptive]", func() {
// currently test is only supported on AWS, GCP, Azure, ibmcloud, alibabacloud
supportPlatforms := []string{"aws", "gcp", "azure", "ibmcloud", "alibabacloud"}
if !exutil.ImplStringArrayContains(supportPlatforms, iaasPlatform) || !isNTO {
g.Skip("NTO is not installed or IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
tunedNodeName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker-tuning", "-ojsonpath={.items[*].metadata.name}").Output()
if len(tunedNodeName) == 0 {
g.Skip("No suitable worker node was found in : " + iaasPlatform + " - skipping test ...")
}
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "tuning-pidmax", "-n", ntoNamespace, "--ignore-not-found").Execute()
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Get cloud provider name ...")
providerName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("profiles.tuned.openshift.io", tunedNodeName, "-n", ntoNamespace, "-ojsonpath={.spec.config.providerName}").Output()
o.Expect(providerName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "provider-"+providerName, "-n", ntoNamespace, "--ignore-not-found").Execute()
ntoRes := ntoResource{
name: "tuning-pidmax",
namespace: ntoNamespace,
template: ntoSysctlTemplate,
sysctlparm: "kernel.pid_max",
sysctlvalue: "282828",
label: "node-role.kubernetes.io/worker-tuning",
}
exutil.By("Create tuning-pidmax profile and apply it to nodes")
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-pidmax", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Compare if the value kernel.pid_max in on labeled node, should be 282828")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.pid_max", "282828")
providerID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.spec.providerID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(providerID).NotTo(o.BeEmpty())
o.Expect(providerID).To(o.ContainSubstring(providerName))
exutil.By("Apply cloud-provider profile ...")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cloudProviderFile, "-p", "PROVIDER_NAME="+providerName)
exutil.By("Check provider + providerName profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(tunedNames).To(o.ContainSubstring("provider-" + providerName))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value is 16386")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "vm.admin_reserve_kbytes", "16386")
//Clean nto resource after upgrade
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning-").Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "tuning-pidmax", "-n", ntoNamespace, "--ignore-not-found").Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "provider-"+providerName, "-n", ntoNamespace, "--ignore-not-found").Execute()
})
g.It("Author:liqcui-Medium-74507-NTO openshift-node-performance-uuid have the same priority warning keeps printing[Disruptive]", func() {
isSNO := exutil.IsSNOCluster(oc)
var firstNodeName string
var secondNodeName string
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
machinesetName := getTotalLinuxMachinesetNum(oc)
e2e.Logf("len(machinesetName) is %v", machinesetName)
if machinesetName > 1 {
firstNodeName = choseOneWorkerNodeToRunCase(oc, 0)
secondNodeName = choseOneWorkerNodeToRunCase(oc, 1)
} else {
firstNodeName = choseOneWorkerNodeNotByMachineset(oc, 0)
secondNodeName = choseOneWorkerNodeNotByMachineset(oc, 1)
}
firstNodeLabel := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker-tuning")
secondNodeLabel := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker-priority18")
if len(firstNodeLabel) == 0 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", firstNodeName, "node-role.kubernetes.io/worker-tuning-").Execute()
}
if len(secondNodeLabel) == 0 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", secondNodeName, "node-role.kubernetes.io/worker-priority18-").Execute()
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "tuning-pidmax", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "tuning-dirtyratio", "-n", ntoNamespace, "--ignore-not-found").Execute()
//Get the tuned pod name in the same node that labeled node
ntoOperatorPodName := getNTOOperatorPodName(oc, ntoNamespace)
o.Expect(ntoOperatorPodName).NotTo(o.BeEmpty())
exutil.By("Pickup two worker nodes to label node to worker-tuning and worker-priority18 ...")
if len(firstNodeLabel) == 0 {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", firstNodeName, "node-role.kubernetes.io/worker-tuning=").Execute()
}
if len(secondNodeLabel) == 0 {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", secondNodeName, "node-role.kubernetes.io/worker-priority18=").Execute()
}
firstNTORes := ntoResource{
name: "tuning-pidmax",
namespace: ntoNamespace,
template: ntoSysctlTemplate,
sysctlparm: "kernel.pid_max",
sysctlvalue: "282828",
label: "node-role.kubernetes.io/worker-tuning",
}
secondNTORes := ntoResource{
name: "tuning-dirtyratio",
namespace: ntoNamespace,
template: ntoSysctlTemplate,
sysctlparm: "vm.dirty_ratio",
sysctlvalue: "56",
label: "node-role.kubernetes.io/worker-priority18",
}
exutil.By("Create tuning-pidmax profile")
firstNTORes.applyNTOTunedProfile(oc)
exutil.By("Create tuning-dirtyratio profile")
secondNTORes.applyNTOTunedProfile(oc)
exutil.By("Create tuning-pidmax profile and apply it to nodes")
firstNTORes.assertIfTunedProfileApplied(oc, ntoNamespace, firstNodeName, "tuning-pidmax", "True")
exutil.By("Create tuning-dirtyratio profile and apply it to nodes")
secondNTORes.assertIfTunedProfileApplied(oc, ntoNamespace, secondNodeName, "tuning-dirtyratio", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Compare if the value kernel.pid_max in on labeled node, should be 282828")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, firstNodeName, "kernel.pid_max", "282828")
exutil.By("Compare if the value kernel.pid_max in on labeled node, should be 282828")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, secondNodeName, "vm.dirty_ratio", "56")
exutil.By("Assert the log contains recommended profile (nf-conntrack-max) matches current configuratio ")
ntoOperatorPodLogs, _ := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, ntoOperatorPodName, "--tail=50").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ntoOperatorPodLogs).NotTo(o.BeEmpty())
o.Expect(ntoOperatorPodLogs).NotTo(o.ContainSubstring("same priority"))
})
g.It("Author:liqcui-Longduration-NonPreRelease-Medium-75555-NTO Tuned pod should starts before workload pods on reboot[Disruptive][Slow].", func() {
var (
paoBaseProfileMCP = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile-mcp.yaml")
paoBaseProfile = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile.yaml")
)
// // test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
skipPAODeploy := skipDeployPAO(oc)
isPAOInstalled = exutil.IsPAOInstalled(oc)
if skipPAODeploy || isPAOInstalled {
e2e.Logf("PAO has been installed and continue to execute test case")
} else {
isPAOInOperatorHub := exutil.IsPAOInOperatorHub(oc)
if !isPAOInOperatorHub {
g.Skip("PAO is not in OperatorHub - skipping test ...")
}
exutil.InstallPAO(oc, paoNamespace)
}
//Prior to choose worker nodes with machineset
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
//Get how many cpus on the specified worker node
exutil.By("Get how many cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCPUCoresInt <= 1 {
g.Skip("the worker node don't have enough cpus - skipping test ...")
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer func() {
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-pao", "worker-pao", 480)
oc.AsAdmin().WithoutNamespace().Run("delete").Args("performanceprofile", "pao-baseprofile", "--ignore-not-found").Execute()
}()
labeledNode := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker-pao")
if len(labeledNode) == 0 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao-").Execute()
exutil.By("Label the node with node-role.kubernetes.io/worker-pao=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// currently test is only supported on AWS, GCP, and Azure
ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
//Only GCP and AWS support realtime-kenel
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=true")
} else {
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=false")
}
exutil.By("Check Performance Profile pao-baseprofile was created automatically")
paoBasePerformanceProfile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(paoBasePerformanceProfile).NotTo(o.BeEmpty())
o.Expect(paoBasePerformanceProfile).To(o.ContainSubstring("pao-baseprofile"))
exutil.By("Create machine config pool worker-pao")
exutil.ApplyOperatorResourceByYaml(oc, "", paoBaseProfileMCP)
exutil.By("Assert if machine config pool applied for worker nodes")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-pao", 1200)
exutil.By("Check openshift-node-performance-pao-baseprofile tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check current profile openshift-node-performance-pao-baseprofile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile openshift-node-performance-pao-baseprofile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-performance-pao-baseprofile")
exutil.By("Check if profile openshift-node-performance-pao-baseprofile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
//$ systemctl status ocp-tuned-one-shot.service
// ocp-tuned-one-shot.service - TuneD service from NTO image
// ..
// Active: inactive (dead) since Thu 2024-06-20 14:29:32 UTC; 5min ago
// notice the tuned in one shot started and finished before kubelet
//Return an error when the systemctl status ocp-tuned-one-shot.service is inactive, so err for o.Expect as expected.
exutil.By("Check if end time of ocp-tuned-one-shot.service prior to startup time of kubelet service")
//supported property name
// 0.InactiveExitTimestampMonotonic
// 1.ExecMainStartTimestampMonotonic
// 2.ActiveEnterTimestampMonotonic
// 3.StateChangeTimestampMonotonic
// 4.ActiveExitTimestampMonotonic
// 5.InactiveEnterTimestampMonotonic
// 6.ConditionTimestampMonotonic
// 7.AssertTimestampMonotonic
inactiveExitTimestampMonotonicOfOCPTunedOneShotService := exutil.ShowSystemctlPropertyValueOfServiceUnitByName(oc, tunedNodeName, ntoNamespace, "ocp-tuned-one-shot.service", "InactiveExitTimestampMonotonic")
ocpTunedOneShotServiceStatusInactiveExitTimestamp := exutil.GetSystemctlServiceUnitTimestampByPropertyNameWithMonotonic(inactiveExitTimestampMonotonicOfOCPTunedOneShotService)
execMainStartTimestampMonotonicOfKubelet := exutil.ShowSystemctlPropertyValueOfServiceUnitByName(oc, tunedNodeName, ntoNamespace, "kubelet.service", "ExecMainStartTimestampMonotonic")
kubeletServiceStatusExecMainStartTimestamp := exutil.GetSystemctlServiceUnitTimestampByPropertyNameWithMonotonic(execMainStartTimestampMonotonicOfKubelet)
e2e.Logf("ocpTunedOneShotServiceStatusInactiveExitTimestamp is: %v, kubeletServiceStatusActiveEnterTimestamp is: %v", ocpTunedOneShotServiceStatusInactiveExitTimestamp, kubeletServiceStatusExecMainStartTimestamp)
o.Expect(kubeletServiceStatusExecMainStartTimestamp).To(o.BeNumerically(">", ocpTunedOneShotServiceStatusInactiveExitTimestamp))
})
g.It("Author:liqcui-Longduration-NonPreRelease-Medium-75435-NTO deferred feature with annotation deferred update[Disruptive]", func() {
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
machinesetName := getTotalLinuxMachinesetNum(oc)
e2e.Logf("len(machinesetName) is %v", machinesetName)
if machinesetName > 1 {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName = choseOneWorkerNodeNotByMachineset(oc, 0)
}
labeledNode := exutil.GetNodeListByLabel(oc, "deferred-update")
if len(labeledNode) == 0 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "deferred-update-").Execute()
}
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "deferred-update-profile", "-n", ntoNamespace, "--ignore-not-found").Execute()
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "4096")
}()
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Pickup one worker nodes to label node to deferred-update ...")
if len(labeledNode) == 0 {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "deferred-update=").Execute()
}
defferedNTORes := ntoResource{
name: "deferred-update-profile",
namespace: ntoNamespace,
template: ntoDefered,
sysctlparm: "kernel.shmmni",
sysctlvalue: "8192",
label: "deferred-update",
deferedValue: "update",
}
exutil.By("Create deferred-update profile")
defferedNTORes.applyNTOTunedProfileWithDeferredAnnotation(oc)
exutil.By("Create deferred-update profile and apply it to nodes")
defferedNTORes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "deferred-update-profile", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Compare if the value kernel.shmmni in on labeled node, should be 8192")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "8192")
exutil.By("Path tuned with new value of kernel.shmmni to 10240")
patchTunedProfile(oc, ntoNamespace, "deferred-update-profile", ntoDeferedUpdatePatch)
exutil.By("Path the tuned profile with a new value, the new value take effective after node reboot")
defferedNTORes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "deferred-update-profile", "False")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profile.tuned.openshift.io", tunedNodeName, `-ojsonpath='{.status.conditions[0].message}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(output).To(o.ContainSubstring("The TuneD daemon profile is waiting for the next node restart"))
exutil.By("Reboot the node with updated tuned profile")
err = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ntoNamespace, "-it", tunedPodName, "--", "reboot").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 600)
exutil.By("Compare if the value kernel.shmmni in on labeled node, should be 10240")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "10240")
exutil.By("Removed deffered tuned custom profile and unlabel node")
defferedNTORes.delete(oc)
exutil.By("Compare if the value kernel.shmmni in on labeled node, it will rollback to 4096")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "4096")
})
g.It("Author:liqcui-Longduration-NonPreRelease-Medium-77764-NTO - Failure to pull NTO image preventing startup of ocp-tuned-one-shot.service[Disruptive]", func() {
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
var (
ntoDisableHttpsMCPFile = exutil.FixturePath("testdata", "psap", "nto", "disable-https-mcp.yaml")
ntoDisableHttpsPPFile = exutil.FixturePath("testdata", "psap", "nto", "disable-https-pp.yaml")
)
proxyStdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-ojsonpath={.spec.httpsProxy}").Output()
e2e.Logf("proxyStdOut is %v", proxyStdOut)
o.Expect(err).NotTo(o.HaveOccurred())
if len(proxyStdOut) == 0 {
g.Skip("No proxy in the cluster - skipping test ...")
}
machinesetName := getTotalLinuxMachinesetNum(oc)
e2e.Logf("len(machinesetName) is %v", machinesetName)
if machinesetName > 1 {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName = choseOneWorkerNodeNotByMachineset(oc, 0)
}
//Get how many cpus on the specified worker node
exutil.By("Get how many cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCPUCoresInt <= 1 {
g.Skip("the worker node don't have enough cpus - skipping test ...")
}
labeledNode := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker-nohttps")
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("mcp", "worker-nohttps", "--ignore-not-found").Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("PerformanceProfile", "performance", "-n", ntoNamespace, "--ignore-not-found").Execute()
}()
if len(labeledNode) == 0 {
defer func() {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-nohttps-").Execute()
//make sure labeled node return to worker mcp
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 720)
}()
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Pickup one worker nodes to label node to worker-nohttps ...")
if len(labeledNode) == 0 {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-nohttps=").Execute()
}
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, ntoDisableHttpsMCPFile)
exutil.By("Remove NTO image on label node")
stdOut, _ := exutil.DebugNodeRetryWithOptionsAndChroot(oc, tunedNodeName, []string{"-q"}, "/bin/bash", "-c", ". /var/lib/ocp-tuned/image.env;podman rmi $NTO_IMAGE --force")
e2e.Logf("removed NTO image is %v", stdOut)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Apply pao performance profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, ntoDisableHttpsPPFile)
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-nohttps", 720)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
//Inactive status mean error in systemctl status ocp-tuned-one-shot.service, that's expected
exutil.By("Check systemctl status ocp-tuned-one-shot.service, Active: inactive is expected")
stdOut, _ = oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "systemctl", "status", "ocp-tuned-one-shot.service").Output()
o.Expect(stdOut).To(o.ContainSubstring("ocp-tuned-one-shot.service: Deactivated successfully"))
exutil.By("Check systemctl status kubelet, Active: active (running) is expected")
stdOut, err = exutil.DebugNodeRetryWithOptionsAndChroot(oc, tunedNodeName, []string{"-q"}, "systemctl", "status", "kubelet")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stdOut).To(o.ContainSubstring("Active: active (running)"))
exutil.By("Remove NTO image on label node and delete tuned pod, the image can pull successfully")
stdOut, err = exutil.DebugNodeRetryWithOptionsAndChroot(oc, tunedNodeName, []string{"-q"}, "/bin/bash", "-c", ". /var/lib/ocp-tuned/image.env;podman rmi $NTO_IMAGE --force")
e2e.Logf("removed NTO image is %v", stdOut)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "pod", tunedPodName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node that labeled node again
tunedPodName = getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
exutil.AssertPodToBeReady(oc, tunedPodName, ntoNamespace)
podDescOuput, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("-n", ntoNamespace, "pod", tunedPodName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podDescOuput).To(o.ContainSubstring("Successfully pulled image"))
})
g.It("Author:sahshah-Longduration-NonPreRelease-Medium-76674-NTO deferred feature with annotation deferred -never[Disruptive]", func() {
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
machinesetName := getTotalLinuxMachinesetNum(oc)
e2e.Logf("len(machinesetName) is %v", machinesetName)
if machinesetName > 1 {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName = choseOneWorkerNodeNotByMachineset(oc, 0)
}
labeledNode := exutil.GetNodeListByLabel(oc, "deferred-never")
if len(labeledNode) == 0 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "deferred-never-").Execute()
}
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "deferred-never-profile", "-n", ntoNamespace, "--ignore-not-found").Execute()
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "4096")
}()
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Pickup one worker nodes to label node to deferred-never ...")
if len(labeledNode) == 0 {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "deferred-never=").Execute()
}
defferedNTORes := ntoResource{
name: "deferred-never-profile",
namespace: ntoNamespace,
template: ntoDefered,
sysctlparm: "kernel.shmmni",
sysctlvalue: "8192",
label: "deferred-never",
deferedValue: "never",
}
exutil.By("Compare if the value kernel.shmmni in on labeled node, should be 4096")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "4096")
exutil.By("Create deferred-never profile")
defferedNTORes.applyNTOTunedProfileWithDeferredAnnotation(oc)
exutil.By("Create deferred-never profile and apply it to nodes")
defferedNTORes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "deferred-never-profile", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profile.tuned.openshift.io", tunedNodeName, `-ojsonpath='{.status.conditions[0].message}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(output).To(o.ContainSubstring("TuneD profile applied"))
exutil.By("Compare if the value kernel.shmmni in on labeled node, should be 8192")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "8192")
})
})
|
package nto
| ||||
test case
|
openshift/openshift-tests-private
|
380a24c8-f468-49b2-a653-646665953f0d
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-29789-Sysctl parameters that set by tuned can be overwritten by parameters set via /etc/sysctl [Flaky]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-29789-Sysctl parameters that set by tuned can be overwritten by parameters set via /etc/sysctl [Flaky]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
exutil.By("Pick one worker node and one tuned pod on same node")
workerNodeName, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(workerNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Worker Node: %v", workerNodeName)
tunedPodName, err := exutil.GetPodName(oc, ntoNamespace, "openshift-app=tuned", workerNodeName)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Check values set by /etc/sysctl on node and store the values")
inotify, _, err := exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "cat", "/etc/sysctl.d/inotify.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(inotify).To(o.And(
o.ContainSubstring("fs.inotify.max_user_watches"),
o.ContainSubstring("fs.inotify.max_user_instances")))
maxUserWatchesValue := getMaxUserWatchesValue(inotify)
maxUserInstancesValue := getMaxUserInstancesValue(inotify)
e2e.Logf("fs.inotify.max_user_watches has value of: %v", maxUserWatchesValue)
e2e.Logf("fs.inotify.max_user_instances has value of: %v", maxUserInstancesValue)
exutil.By("Mount /etc/sysctl on node")
_, err = exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "mount")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check sysctl kernel.pid_max on node and store the value")
kernel, _, err := exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "kernel.pid_max")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kernel).To(o.ContainSubstring("kernel.pid_max"))
pidMaxValue := getKernelPidMaxValue(kernel)
e2e.Logf("kernel.pid_max has value of: %v", pidMaxValue)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuneds.tuned.openshift.io", "override").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", workerNodeName, "tuned.openshift.io/override-").Execute()
//tuned can not override parameters set via /etc/sysctl{.conf,.d} when reapply_sysctl=true
// The settings in /etc/sysctl.d/inotify.conf as below
// fs.inotify.max_user_watches = 65536 =>Try to override to 163840 by tuned, expect the old value 65536
// fs.inotify.max_user_instances = 8192 =>Not override by tuned, expect the old value 8192
// kernel.pid_max = 4194304 =>Default value is 4194304
// The settings in custom tuned profile as below
// fs.inotify.max_user_watches = 163840 =>Try to override to 163840 by tuned, expect the old value 65536
// kernel.pid_max = 1048576 =>Override by tuned, expect the new value 1048576
exutil.By("Create new NTO CR with reapply_sysctl=true and label the node")
//reapply_sysctl=true tuned can not override parameters set via /etc/sysctl{.conf,.d}
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", workerNodeName, "tuned.openshift.io/override=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", overrideFile, "REAPPLY_SYSCTL=true")
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, workerNodeName, "override")
exutil.By("Check value of fs.inotify.max_user_instances on node (set by sysctl, should be the same as before), expected value is 8192")
maxUserInstanceCheck, _, err := exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "fs.inotify.max_user_instances")
e2e.Logf("fs.inotify.max_user_instances has value of: %v", maxUserInstanceCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(maxUserInstanceCheck).To(o.ContainSubstring(maxUserInstancesValue))
exutil.By("Check value of fs.inotify.max_user_watches on node (set by sysctl, should be the same as before),expected value is 65536")
maxUserWatchesCheck, _, err := exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "fs.inotify.max_user_watches")
e2e.Logf("fs.inotify.max_user_watches has value of: %v", maxUserWatchesCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(maxUserWatchesCheck).To(o.ContainSubstring(maxUserWatchesValue))
exutil.By("Check value of kernel.pid_max on node (set by override tuned, should be the same value of override custom profile), expected value is 1048576")
pidMaxCheck, _, err := exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "kernel.pid_max")
e2e.Logf("kernel.pid_max has value of: %v", pidMaxCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(pidMaxCheck).To(o.ContainSubstring("kernel.pid_max = 1048576"))
//tuned can override parameters set via /etc/sysctl{.conf,.d} when reapply_sysctl=false
// The settings in /etc/sysctl.d/inotify.conf as below
// fs.inotify.max_user_watches = 65536 =>Try to override to 163840 by tuned, expect the old value 163840
// fs.inotify.max_user_instances = 8192 =>Not override by tuned, expect the old value 8192
// kernel.pid_max = 4194304 =>Default value is 4194304
// The settings in custom tuned profile as below
// fs.inotify.max_user_watches = 163840 =>Try to override to 163840 by tuned, expect the old value 163840
// kernel.pid_max = 1048576 =>Override by tuned, expect the new value 1048576
exutil.By("Create new CR with reapply_sysctl=true")
//reapply_sysctl=true tuned can not override parameters set via /etc/sysctl{.conf,.d}
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", overrideFile, "REAPPLY_SYSCTL=false")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check value of fs.inotify.max_user_instances on node (set by sysctl, should be the same as before),expected value is 8192")
maxUserInstanceCheck, _, err = exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "fs.inotify.max_user_instances")
e2e.Logf("fs.inotify.max_user_instances has value of: %v", maxUserInstanceCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(maxUserInstanceCheck).To(o.ContainSubstring(maxUserInstanceCheck))
exutil.By("Check value of fs.inotify.max_user_watches on node (set by sysctl, should be the same value of override custom profile), expected value is 163840")
maxUserWatchesCheck, _, err = exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "fs.inotify.max_user_watches")
e2e.Logf("fs.inotify.max_user_watches has value of: %v", maxUserWatchesCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(maxUserWatchesCheck).To(o.ContainSubstring("fs.inotify.max_user_watches = 163840"))
exutil.By("Check value of kernel.pid_max on node (set by override tuned, should be the same value of override custom profile), expected value is 1048576")
pidMaxCheck, _, err = exutil.DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc, workerNodeName, []string{"-q"}, "sysctl", "kernel.pid_max")
e2e.Logf("kernel.pid_max has value of: %v", pidMaxCheck)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(pidMaxCheck).To(o.ContainSubstring("kernel.pid_max = 1048576"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
c0b2f416-5507-45ed-81c4-901532270a50
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-33237-Test NTO support for operatorapi Unmanaged state [Flaky]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-33237-Test NTO support for operatorapi Unmanaged state [Flaky]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
defer func() {
exutil.By("Remove custom profile (if not already removed) and patch default tuned back to Managed")
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuned", "nf-conntrack-max", "--ignore-not-found").Execute()
_ = patchTunedState(oc, ntoNamespace, "default", "Managed")
}()
isSNO := exutil.IsSNOCluster(oc)
is3Master := exutil.Is3MasterNoDedicatedWorkerNode(oc)
var profileCheck string
masterNodeName := getFirstMasterNodeName(oc)
defaultMasterProfileName := getDefaultProfileNameOnMaster(oc, masterNodeName)
exutil.By("Create logging namespace")
oc.SetupProject()
loggingNamespace := oc.Namespace()
exutil.By("Patch default tuned to 'Unmanaged'")
err := patchTunedState(oc, ntoNamespace, "default", "Unmanaged")
o.Expect(err).NotTo(o.HaveOccurred())
state, err := getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Unmanaged"))
exutil.By("Create new pod from CR and label it")
exutil.CreateNsResourceFromTemplate(oc, loggingNamespace, "--ignore-unknown-parameters=true", "-f", podTestFile)
err = exutil.LabelPod(oc, loggingNamespace, "web", "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait for pod web is ready")
exutil.AssertPodToBeReady(oc, "web", loggingNamespace)
exutil.By("Get the tuned node and pod names")
tunedNodeName, err := exutil.GetPodNodeName(oc, loggingNamespace, "web")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Tuned Node: %v", tunedNodeName)
tunedPodName, err := exutil.GetPodName(oc, ntoNamespace, "openshift-app=tuned", tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Create new profile from CR")
exutil.CreateNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", tunedNFConntrackMaxFile)
exutil.By("All node's current profile is:")
stdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Profile Name Per Nodes: %v", stdOut)
logsCheck, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, "--tail=9", tunedPodName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(logsCheck).NotTo(o.ContainSubstring("nf-conntrack-max"))
if isSNO || is3Master {
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal(defaultMasterProfileName))
} else {
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("openshift-node"))
}
nodeList, err := exutil.GetAllNodesbyOSType(oc, "linux")
o.Expect(err).NotTo(o.HaveOccurred())
nodeListSize := len(nodeList)
for i := 0; i < nodeListSize; i++ {
output, err := exutil.DebugNodeWithChroot(oc, nodeList[i], "sysctl", "net.netfilter.nf_conntrack_max")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048576"))
}
exutil.By("Remove custom profile and pod and patch default tuned back to Managed")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuned", "nf-conntrack-max").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", loggingNamespace, "pod", "web").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = patchTunedState(oc, ntoNamespace, "default", "Managed")
o.Expect(err).NotTo(o.HaveOccurred())
state, err = getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Managed"))
exutil.By("Create new pod from CR and label it")
exutil.CreateNsResourceFromTemplate(oc, loggingNamespace, "--ignore-unknown-parameters=true", "-f", podTestFile)
err = exutil.LabelPod(oc, loggingNamespace, "web", "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get the tuned node and pod names")
tunedNodeName, err = exutil.GetPodNodeName(oc, loggingNamespace, "web")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Tuned Node: %v", tunedNodeName)
tunedPodName, err = exutil.GetPodName(oc, ntoNamespace, "openshift-app=tuned", tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Tuned Pod: %v", tunedPodName)
exutil.By("Create new profile from CR")
exutil.CreateNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", tunedNFConntrackMaxFile)
exutil.By("All node's current profile is:")
stdOut, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Profile Name Per Nodes: %v", stdOut)
exutil.By("Assert nf-conntrack-max applied to the node that web application run on it.")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "nf-conntrack-max")
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("nf-conntrack-max"))
exutil.By("All node's current profile is:")
stdOut, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Profile Name Per Nodes: %v", stdOut)
// tuned nodes should have value of 1048578, others should be 1048576
for i := 0; i < nodeListSize; i++ {
output, err := exutil.DebugNodeWithChroot(oc, nodeList[i], "sysctl", "net.netfilter.nf_conntrack_max")
o.Expect(err).NotTo(o.HaveOccurred())
if nodeList[i] == tunedNodeName {
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048578"))
} else {
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048576"))
}
}
exutil.By("Change tuned state back to Unmanaged and delete custom tuned")
err = patchTunedState(oc, ntoNamespace, "default", "Unmanaged")
o.Expect(err).NotTo(o.HaveOccurred())
state, err = getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Unmanaged"))
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuned", "nf-conntrack-max").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("nf-conntrack-max"))
exutil.By("Assert the log contains recommended profile (nf-conntrack-max) matches current configuratio ")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "20", 180, `recommended profile \(nf-conntrack-max\) matches current configuration|static tuning from profile 'nf-conntrack-max' applied`)
exutil.By("All node's current profile is:")
stdOut, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Profile Name Per Nodes: %v", stdOut)
// tuned nodes should have value of 1048578, others should be 1048576
for i := 0; i < nodeListSize; i++ {
output, err := exutil.DebugNodeWithChroot(oc, nodeList[i], "sysctl", "net.netfilter.nf_conntrack_max")
o.Expect(err).NotTo(o.HaveOccurred())
if nodeList[i] == tunedNodeName {
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048578"))
} else {
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048576"))
}
}
exutil.By("Changed tuned state back to Managed")
err = patchTunedState(oc, ntoNamespace, "default", "Managed")
o.Expect(err).NotTo(o.HaveOccurred())
state, err = getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Managed"))
if isSNO || is3Master {
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, defaultMasterProfileName)
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal(defaultMasterProfileName))
} else {
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node")
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("openshift-node"))
}
exutil.By("All node's current profile is:")
stdOut, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Profile Name Per Nodes: %v", stdOut)
for i := 0; i < nodeListSize; i++ {
output, err := exutil.DebugNodeWithChroot(oc, nodeList[i], "sysctl", "net.netfilter.nf_conntrack_max")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring("net.netfilter.nf_conntrack_max = 1048576"))
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
b904a423-8c50-4521-8779-e54a93d8d521
|
Longduration-NonPreRelease-Author:liqcui-Medium-36881-Node Tuning Operator will provide machine config for the master machine config pool [Disruptive] [Slow]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-36881-Node Tuning Operator will provide machine config for the master machine config pool [Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
isOneMasterwithNWorker := exutil.IsOneMasterWithNWorkerNodes(oc)
if !isNTO || isSNO || isOneMasterwithNWorker {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
defer func() {
exutil.By("Remove new tuning profile after test completion")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuneds.tuned.openshift.io", "openshift-node-performance-hp-performanceprofile").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
exutil.By("Add new tuning profile from CR")
exutil.CreateNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", hPPerformanceProfileFile)
exutil.By("Verify new tuned profile was created")
profiles, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profiles).To(o.ContainSubstring("openshift-node-performance-hp-performanceprofile"))
exutil.By("Get NTO pod name and check logs for priority warning")
ntoPodName, err := getNTOPodName(oc, ntoNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("NTO pod name: %v", ntoPodName)
//ntoPodLogs, err := exutil.GetSpecificPodLogs(oc, ntoNamespace, "", ntoPodName, "")
assertNTOPodLogsLastLines(oc, ntoNamespace, ntoPodName, "10", 180, `openshift-node-performance-hp-performanceprofile have the same priority 30.*please use a different priority for your custom profiles`)
//o.Expect(err).NotTo(o.HaveOccurred())
//o.Expect(ntoPodLogs).To(o.ContainSubstring("profiles openshift-control-plane/openshift-node-performance-hp-performanceprofile have the same priority 30, please use a different priority for your custom profiles!"))
exutil.By("Patch priority for openshift-node-performance-hp-performanceprofile tuned to 18")
err = patchTunedProfile(oc, ntoNamespace, "openshift-node-performance-hp-performanceprofile", hpPerformanceProfilePatchFile)
o.Expect(err).NotTo(o.HaveOccurred())
tunedPriority, err := getTunedPriority(oc, ntoNamespace, "openshift-node-performance-hp-performanceprofile")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedPriority).To(o.Equal("18"))
exutil.By("Check Nodes for expected changes")
masterNodeName := assertIfNodeSchedulingDisabled(oc)
e2e.Logf("The master node %v has been rebooted", masterNodeName)
exutil.By("Check MachineConfigPool for expected changes")
exutil.AssertIfMCPChangesAppliedByName(oc, "master", 1800)
exutil.By("Ensure the settings took effect on the master nodes, only check the first rebooted nodes")
assertIfMasterNodeChangesApplied(oc, masterNodeName)
exutil.By("Check MachineConfig kernel arguments for expected changes")
mcCheck, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("mc").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(mcCheck).To(o.ContainSubstring("50-nto-master"))
mcKernelArgCheck, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("mc/50-nto-master").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(mcKernelArgCheck).To(o.ContainSubstring("default_hugepagesz=2M"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
ec8b85dd-d84b-44be-9881-c8a6040f4cd5
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-23959-Test NTO for remove pod in daemon mode [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-23959-Test NTO for remove pod in daemon mode [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
ntoRes := ntoResource{
name: "kernel-pid-max",
namespace: ntoNamespace,
template: customTunedProfile,
sysctlparm: "kernel.pid_max",
sysctlvalue: "128888",
}
defer func() {
exutil.By("Remove custom profile (if not already removed) and patch default tuned back to Managed")
ntoRes.delete(oc)
_ = patchTunedState(oc, ntoNamespace, "default", "Managed")
}()
isSNO := exutil.IsSNOCluster(oc)
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer func() {
exutil.By("Forcily delete labeled pod on first worker node after test case executed in case compareSysctlDifferentFromSpecifiedValueByName step failure")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", tunedPodName, "-n", ntoNamespace, "--ignore-not-found").Execute()
}()
exutil.By("Apply new profile from CR")
ntoRes.createTunedProfileIfNotExist(oc)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check all nodes for kernel.pid_max value, all node should different from 128888")
compareSysctlDifferentFromSpecifiedValueByName(oc, "kernel.pid_max", "128888")
exutil.By("Label tuned pod as tuned.openshift.io/elasticsearch=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", tunedPodName, "-n", ntoNamespace, "tuned.openshift.io/elasticsearch=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check if customized tuned profile applied on target node")
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "kernel-pid-max", "True")
exutil.By("Compare if the value kernel.pid_max in on node with labeled pod, should be 128888")
compareSysctlValueOnSepcifiedNodeByName(oc, tunedNodeName, "kernel.pid_max", "", "128888")
exutil.By("Delete labeled tuned pod by name")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", tunedPodName, "-n", ntoNamespace).Execute()
exutil.By("Check all nodes for kernel.pid_max value, all node should different from 128888")
compareSysctlDifferentFromSpecifiedValueByName(oc, "kernel.pid_max", "128888")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
2aa2c77b-881d-44e1-b5f8-11928e50399a
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-23958-Test NTO for label pod in daemon mode [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-23958-Test NTO for label pod in daemon mode [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
ntoRes := ntoResource{
name: "user-max-ipc-namespaces",
namespace: ntoNamespace,
template: customTunedProfile,
sysctlparm: "user.max_ipc_namespaces",
sysctlvalue: "121112",
}
defer func() {
exutil.By("Remove custom profile (if not already removed) and patch default tuned back to Managed")
ntoRes.delete(oc)
}()
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer func() {
exutil.By("Forcily remove label from the pod on first worker node in case compareSysctlDifferentFromSpecifiedValueByName step failure")
err = exutil.LabelPod(oc, ntoNamespace, tunedPodName, "tuned.openshift.io/elasticsearch-")
}()
exutil.By("Apply new profile from CR")
ntoRes.createTunedProfileIfNotExist(oc)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check all nodes for user.max_ipc_namespaces value, all node should different from 121112")
compareSysctlDifferentFromSpecifiedValueByName(oc, "user.max_ipc_namespaces", "121112")
exutil.By("Label tuned pod as tuned.openshift.io/elasticsearch=")
err = exutil.LabelPod(oc, ntoNamespace, tunedPodName, "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check current profile for each node")
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "user-max-ipc-namespaces", "True")
exutil.By("Compare if the value user.max_ipc_namespaces in on node with labeled pod, should be 121112")
compareSysctlValueOnSepcifiedNodeByName(oc, tunedNodeName, "user.max_ipc_namespaces", "", "121112")
exutil.By("Remove label from tuned pod as tuned.openshift.io/elasticsearch-")
err = exutil.LabelPod(oc, ntoNamespace, tunedPodName, "tuned.openshift.io/elasticsearch-")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check all nodes for user.max_ipc_namespaces value, all node should different from 121112")
compareSysctlDifferentFromSpecifiedValueByName(oc, "user.max_ipc_namespaces", "121112")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
685ce689-2d39-4c49-a2d5-2bf732bc6173
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-43173-NTO Cgroup Blacklist Pod should affine to default cpuset.[Disruptive]
|
['"regexp"', '"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-43173-NTO Cgroup Blacklist Pod should affine to default cpuset.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get how many cpus on the specified worker node
exutil.By("Get how many cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCPUCoresInt <= 1 {
g.Skip("the worker node don't have enough cpus - skipping test ...")
}
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Remove custom profile (if not already removed) and remove node label")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "-n", ntoNamespace, "cgroup-scheduler-affinecpuset").Execute()
defer func() {
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned-scheduler-node-").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}()
exutil.By("Label the specified linux node with label tuned-scheduler-node")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned-scheduler-node=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// setting cgroup_ps_blacklist=/kubepods\.slice/
// the process belong the /kubepods\.slice/ can consume all cpuset
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
// the process doesn't belong the /kubepods\.slice/ can consume all cpuset
// The expected Cpus_allowed_list in /proc/$PID/status should be 0 or 0,2-N
exutil.By("Create NTO custom tuned profile cgroup-scheduler-affinecpuset")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cgroupSchedulerBacklist, "-p", "PROFILE_NAME=cgroup-scheduler-affinecpuset", `CGROUP_BLACKLIST=/kubepods\.slice/`)
exutil.By("Check if NTO custom tuned profile cgroup-scheduler-affinecpuset was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "cgroup-scheduler-affinecpuset")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
exutil.By("Verified the cpu allow list in cgroup black list for tuned ...")
clusterVersion, _, err := exutil.GetClusterVersion(oc)
versionReg := regexp.MustCompile(`4.12|4.13`)
o.Expect(err).NotTo(o.HaveOccurred())
if versionReg.MatchString(clusterVersion) {
o.Expect(assertProcessInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "openshift-tuned", nodeCPUCoresInt)).To(o.Equal(true))
} else {
o.Expect(assertProcessInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "tuned", nodeCPUCoresInt)).To(o.Equal(true))
}
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
exutil.By("Verified the cpu allow list in cgroup black list for chronyd ...")
o.Expect(assertProcessNOTInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "chronyd", nodeCPUCoresInt)).To(o.Equal(true))
})
| |||||
test case
|
openshift/openshift-tests-private
|
052f35f7-ef56-4f84-ad18-c98f05443ab3
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-27491-Add own custom profile to tuned operator [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-27491-Add own custom profile to tuned operator [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
ntoRes := ntoResource{
name: "user-max-mnt-namespaces",
namespace: ntoNamespace,
template: customTunedProfile,
sysctlparm: "user.max_mnt_namespaces",
sysctlvalue: "142214",
}
masterNodeName := getFirstMasterNodeName(oc)
defaultMasterProfileName := getDefaultProfileNameOnMaster(oc, masterNodeName)
oc.SetupProject()
ntoTestNS := oc.Namespace()
is3CPNoWorker := exutil.Is3MasterNoDedicatedWorkerNode(oc)
//Clean up the custom profile user-max-mnt-namespaces and unlabel the nginx pod
defer ntoRes.delete(oc)
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
// AppImageName := exutil.GetImagestreamImageName(oc, "tests")
// if len(AppImageName) == 0 {
AppImageName := "quay.io/openshifttest/nginx-alpine@sha256:04f316442d48ba60e3ea0b5a67eb89b0b667abf1c198a3d0056ca748736336a0"
// }
//Create a nginx web application pod
exutil.By("Create a nginx web pod in nto temp namespace")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", podShippedFile, "-p", "IMAGENAME="+AppImageName)
//Check if nginx pod is ready
exutil.AssertPodToBeReady(oc, "nginx", ntoTestNS)
//Get the node name in the same node as nginx app
tunedNodeName, err := exutil.GetPodNodeName(oc, ntoTestNS, "nginx")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("tunedNodeName is %v", tunedNodeName)
//Get the tuned pod name in the same node as nginx app
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//Label pod nginx with tuned.openshift.io/elasticsearch=
exutil.By("Label nginx pod as tuned.openshift.io/elasticsearch=")
err = exutil.LabelPod(oc, ntoTestNS, "nginx", "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
//Apply new profile that match label tuned.openshift.io/elasticsearch=
exutil.By("Apply new profile from CR")
ntoRes.createTunedProfileIfNotExist(oc)
exutil.By("Check if new profile user-max-mnt-namespaces applied to labeled node")
//Verify if the new profile is applied
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "user-max-mnt-namespaces")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("user-max-mnt-namespaces"))
exutil.By("Assert static tuning from profile 'user-max-mnt-namespaces' applied in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 180, `static tuning from profile 'user-max-mnt-namespaces' applied|active and recommended profile \(user-max-mnt-namespaces\) match`)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Compare if the value user.max_mnt_namespaces in on node with labeled pod, should be 142214")
compareSysctlValueOnSepcifiedNodeByName(oc, tunedNodeName, "user.max_mnt_namespaces", "", "142214")
exutil.By("Delete custom tuned profile user.max_mnt_namespaces")
ntoRes.delete(oc)
//Check if restore to default profile.
isSNO := exutil.IsSNOCluster(oc)
if isSNO || is3CPNoWorker {
exutil.By("The cluster is SNO or Compact Cluster")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, defaultMasterProfileName)
exutil.By("Assert default profile applied in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 180, "'"+defaultMasterProfileName+"' applied|("+defaultMasterProfileName+") match")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal(defaultMasterProfileName))
} else {
exutil.By("The cluster is regular OCP Cluster")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node")
exutil.By("Assert profile 'openshift-node' applied in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 180, `static tuning from profile 'openshift-node' applied|active and recommended profile \(openshift-node\) match`)
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("openshift-node"))
}
exutil.By("Check all nodes for user.max_mnt_namespaces value, all node should different from 142214")
compareSysctlDifferentFromSpecifiedValueByName(oc, "user.max_mnt_namespaces", "142214")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
fcbf1529-6f82-4c15-9cb0-82b59ceff70c
|
ROSA-OSD_CCS-NonHyperShiftHOST-NonPreRelease-Longduration-Author:liqcui-Medium-37125-Turning on debugging for tuned containers.[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-NonPreRelease-Longduration-Author:liqcui-Medium-37125-Turning on debugging for tuned containers.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
ntoRes := ntoResource{
name: "user-max-net-namespaces",
namespace: ntoNamespace,
template: ntoTunedDebugFile,
sysctlparm: "user.max_net_namespaces",
sysctlvalue: "101010",
}
var (
isEnableDebug bool
isDebugInLog bool
)
//Clean up the custom profile user-max-mnt-namespaces
defer ntoRes.delete(oc)
//Create a temp namespace to deploy nginx pod
oc.SetupProject()
ntoTestNS := oc.Namespace()
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
// AppImageName := exutil.GetImagestreamImageName(oc, "tests")
// if len(AppImageName) == 0 {
AppImageName := "quay.io/openshifttest/nginx-alpine@sha256:04f316442d48ba60e3ea0b5a67eb89b0b667abf1c198a3d0056ca748736336a0"
// }
//Create a nginx web application pod
exutil.By("Create a nginx web pod in nto temp namespace")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", podNginxFile, "-p", "IMAGENAME="+AppImageName)
//Check if nginx pod is ready
exutil.AssertPodToBeReady(oc, "nginx", ntoTestNS)
//Get the node name in the same node as nginx app
tunedNodeName, err := exutil.GetPodNodeName(oc, ntoTestNS, "nginx")
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node as nginx app
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//To reset tuned pod log, forcily to delete tuned pod
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", tunedPodName, "-n", ntoNamespace, "--ignore-not-found=true").Execute()
//Label pod nginx with tuned.openshift.io/elasticsearch=
exutil.By("Label nginx pod as tuned.openshift.io/elasticsearch=")
err = exutil.LabelPod(oc, ntoTestNS, "nginx", "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
//Verify if debug was disabled by default
exutil.By("Check node profile debug settings, it should be debug: false")
isEnableDebug = assertDebugSettings(oc, tunedNodeName, ntoNamespace, "false")
o.Expect(isEnableDebug).To(o.Equal(true))
//Apply new profile that match label tuned.openshift.io/elasticsearch=
exutil.By("Apply new profile from CR with debug setting is false")
ntoRes.createDebugTunedProfileIfNotExist(oc, false)
//Verify if the new profile is applied
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "user-max-net-namespaces", "True")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("user-max-net-namespaces"))
//Verify nto tuned logs
exutil.By("Check NTO tuned pod logs to confirm if user-max-net-namespaces applied")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 180, `'user-max-net-namespaces' applied|\(user-max-net-namespaces\) match`)
//Verify if debug is false by CR setting
exutil.By("Check node profile debug settings, it should be debug: false")
isEnableDebug = assertDebugSettings(oc, tunedNodeName, ntoNamespace, "false")
o.Expect(isEnableDebug).To(o.Equal(true))
//Check if the log contain debug, the expected result should be none
exutil.By("Check if tuned pod log contains debug key word, the expected result should be no DEBUG")
isDebugInLog = exutil.AssertOprPodLogsbyFilter(oc, tunedPodName, ntoNamespace, "DEBUG", 2)
o.Expect(isDebugInLog).To(o.Equal(false))
exutil.By("Delete custom profile and will apply a new one ...")
ntoRes.delete(oc)
exutil.By("Apply new profile from CR with debug setting is true")
ntoRes.createDebugTunedProfileIfNotExist(oc, true)
//Verify if the new profile is applied
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "user-max-net-namespaces", "True")
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("user-max-net-namespaces"))
//Verify nto tuned logs
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 180, `'user-max-net-namespaces' applied|\(user-max-net-namespaces\) match`)
//Verify if debug was enabled by CR setting
exutil.By("Check if the debug is true in node profile, the expected result should be true")
isEnableDebug = assertDebugSettings(oc, tunedNodeName, ntoNamespace, "true")
o.Expect(isEnableDebug).To(o.Equal(true))
//The log shouldn't contain debug in log
exutil.By("Check if tuned pod log contains debug key word, the log should contain DEBUG")
exutil.AssertOprPodLogsbyFilterWithDuration(oc, tunedPodName, ntoNamespace, "DEBUG", 60, 2)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
6ab0cc3c-474f-4081-bdc4-82008391d873
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-37415-Allow setting isolated_cores without touching the default_irq_affinity [Disruptive]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-37415-Allow setting isolated_cores without touching the default_irq_affinity [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned.openshift.io/default-irq-smp-affinity-").Execute()
exutil.By("Label the node with default-irq-smp-affinity ")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned.openshift.io/default-irq-smp-affinity=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the default values of /proc/irq/default_smp_affinity on worker nodes")
//Replace exutil.DebugNodeWithOptionsAndChroot with oc.AsAdmin().WithoutNamespace due to throw go warning even if set --quiet=true
//This test case must got the value of default_smp_affinity without warning information
defaultSMPAffinity, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "cat", "/proc/irq/default_smp_affinity").Output()
e2e.Logf("the default value of /proc/irq/default_smp_affinity without cpu affinity is: %v", defaultSMPAffinity)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultSMPAffinity).NotTo(o.BeEmpty())
defaultSMPAffinity = strings.ReplaceAll(defaultSMPAffinity, ",", "")
defaultSMPAffinityMask := getDefaultSMPAffinityBitMaskbyCPUCores(oc, tunedNodeName)
o.Expect(defaultSMPAffinity).To(o.ContainSubstring(defaultSMPAffinityMask))
e2e.Logf("the value of /proc/irq/default_smp_affinity: %v", defaultSMPAffinityMask)
cpuBitsMask := convertCPUBitMaskToByte(defaultSMPAffinityMask)
o.Expect(cpuBitsMask).NotTo(o.BeEmpty())
ntoRes1 := ntoResource{
name: "default-irq-smp-affinity",
namespace: ntoNamespace,
template: ntoIRQSMPFile,
sysctlparm: "#default_irq_smp_affinity",
sysctlvalue: "1",
}
defer ntoRes1.delete(oc)
exutil.By("Create default-irq-smp-affinity profile to enable isolated_cores=1")
ntoRes1.createIRQSMPAffinityProfileIfNotExist(oc)
exutil.By("Check if new NTO profile was applied")
ntoRes1.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "default-irq-smp-affinity", "True")
exutil.By("Check values of /proc/irq/default_smp_affinity on worker nodes after enabling isolated_cores=1")
isolatedcoresSMPAffinity, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "cat", "/proc/irq/default_smp_affinity").Output()
isolatedcoresSMPAffinity = strings.ReplaceAll(isolatedcoresSMPAffinity, ",", "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(isolatedcoresSMPAffinity).NotTo(o.BeEmpty())
e2e.Logf("the value of default_smp_affinity after setting isolated_cores=1 is: %v", isolatedcoresSMPAffinity)
exutil.By("Verify if the value of /proc/irq/default_smp_affinity is affected by isolated_cores=1")
//Isolate the second cpu cores, the default_smp_affinity should be changed
isolatedCPU := convertIsolatedCPURange2CPUList("1")
o.Expect(isolatedCPU).NotTo(o.BeEmpty())
newSMPAffinityMask := assertIsolateCPUCoresAffectedBitMask(cpuBitsMask, isolatedCPU)
o.Expect(newSMPAffinityMask).NotTo(o.BeEmpty())
o.Expect(isolatedcoresSMPAffinity).To(o.ContainSubstring(newSMPAffinityMask))
exutil.By("Remove the old profile and create a new one later ...")
ntoRes1.delete(oc)
ntoRes2 := ntoResource{
name: "default-irq-smp-affinity",
namespace: ntoNamespace,
template: ntoIRQSMPFile,
sysctlparm: "default_irq_smp_affinity",
sysctlvalue: "1",
}
defer ntoRes2.delete(oc)
exutil.By("Create default-irq-smp-affinity profile to enable default_irq_smp_affinity=1")
ntoRes2.createIRQSMPAffinityProfileIfNotExist(oc)
exutil.By("Check if new NTO profile was applied")
ntoRes2.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "default-irq-smp-affinity", "True")
exutil.By("Check values of /proc/irq/default_smp_affinity on worker nodes")
//We only need to return the value /proc/irq/default_smp_affinity without stdErr
IRQSMPAffinity, _, err := exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"--quiet=true", "--to-namespace=" + ntoNamespace}, "cat", "/proc/irq/default_smp_affinity")
IRQSMPAffinity = strings.ReplaceAll(IRQSMPAffinity, ",", "")
o.Expect(IRQSMPAffinity).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
//Isolate the second cpu cores, the default_smp_affinity should be changed
e2e.Logf("the value of default_smp_affinity after setting default_irq_smp_affinity=1 is: %v", IRQSMPAffinity)
isMatch := assertDefaultIRQSMPAffinityAffectedBitMask(cpuBitsMask, isolatedCPU, string(IRQSMPAffinity))
o.Expect(isMatch).To(o.Equal(true))
})
| |||||
test case
|
openshift/openshift-tests-private
|
43b53e3f-9f55-4641-ae5e-79a5cc495d06
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-44650-NTO profiles provided with TuneD [Disruptive]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-44650-NTO profiles provided with TuneD [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
//Get the tuned pod name that run on first worker node
tunedNodeName, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
exutil.By("Check kernel version of worker nodes ...")
kernelVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.kernelVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kernelVersion).NotTo(o.BeEmpty())
exutil.By("Check default tuned profile list, should contain openshift-control-plane and openshift-node")
defaultTunedOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned", "default", "-ojsonpath={.spec.recommend}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultTunedOutput).NotTo(o.BeEmpty())
o.Expect(defaultTunedOutput).To(o.And(
o.ContainSubstring("openshift-control-plane"),
o.ContainSubstring("openshift-node")))
exutil.By("Check content of tuned file /usr/lib/tuned/openshift/tuned.conf to match default NTO settings")
openshiftTunedConf, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/openshift/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftTunedConf).NotTo(o.BeEmpty())
if strings.Contains(kernelVersion, "el8") || strings.Contains(kernelVersion, "el7") {
o.Expect(openshiftTunedConf).To(o.And(
o.ContainSubstring("avc_cache_threshold=8192"),
o.ContainSubstring("kernel.pid_max=>4194304"),
o.ContainSubstring("net.netfilter.nf_conntrack_max=1048576"),
o.ContainSubstring("net.ipv4.conf.all.arp_announce=2"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh1=8192"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh2=32768"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh3=65536"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh1=8192"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh2=32768"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh3=65536"),
o.ContainSubstring("vm.max_map_count=262144"),
o.ContainSubstring("/sys/module/nvme_core/parameters/io_timeout=4294967295"),
o.ContainSubstring(`cgroup_ps_blacklist=/kubepods\.slice/`),
o.ContainSubstring("runtime=0")))
} else {
o.Expect(openshiftTunedConf).To(o.And(
o.ContainSubstring("avc_cache_threshold=8192"),
o.ContainSubstring("nf_conntrack_hashsize=1048576"),
o.ContainSubstring("kernel.pid_max=>4194304"),
o.ContainSubstring("fs.aio-max-nr=>1048576"),
o.ContainSubstring("net.netfilter.nf_conntrack_max=1048576"),
o.ContainSubstring("net.ipv4.conf.all.arp_announce=2"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh1=8192"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh2=32768"),
o.ContainSubstring("net.ipv4.neigh.default.gc_thresh3=65536"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh1=8192"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh2=32768"),
o.ContainSubstring("net.ipv6.neigh.default.gc_thresh3=65536"),
o.ContainSubstring("vm.max_map_count=262144"),
o.ContainSubstring("/sys/module/nvme_core/parameters/io_timeout=4294967295"),
o.ContainSubstring(`cgroup_ps_blacklist=/kubepods\.slice/`),
o.ContainSubstring("runtime=0")))
}
exutil.By("Check content of tuned file /usr/lib/tuned/openshift-control-plane/tuned.conf to match default NTO settings")
openshiftControlPlaneTunedConf, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/openshift-control-plane/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftControlPlaneTunedConf).NotTo(o.BeEmpty())
o.Expect(openshiftControlPlaneTunedConf).To(o.ContainSubstring("include=openshift"))
if strings.Contains(kernelVersion, "el8") || strings.Contains(kernelVersion, "el7") {
o.Expect(openshiftControlPlaneTunedConf).To(o.And(
o.ContainSubstring("sched_wakeup_granularity_ns=4000000"),
o.ContainSubstring("sched_migration_cost_ns=5000000")))
} else {
o.Expect(openshiftControlPlaneTunedConf).NotTo(o.And(
o.ContainSubstring("sched_wakeup_granularity_ns=4000000"),
o.ContainSubstring("sched_migration_cost_ns=5000000")))
}
exutil.By("Check content of tuned file /usr/lib/tuned/openshift-node/tuned.conf to match default NTO settings")
openshiftNodeTunedConf, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/openshift-node/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftNodeTunedConf).To(o.And(
o.ContainSubstring("include=openshift"),
o.ContainSubstring("net.ipv4.tcp_fastopen=3"),
o.ContainSubstring("fs.inotify.max_user_watches=65536"),
o.ContainSubstring("fs.inotify.max_user_instances=8192")))
})
| |||||
test case
|
openshift/openshift-tests-private
|
fa246868-150e-411f-b457-974211284e12
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-33238-Test NTO support for operatorapi Removed state [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-33238-Test NTO support for operatorapi Removed state [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
exutil.By("Remove custom profile (if not already removed) and patch default tuned back to Managed")
//Cleanup tuned and change back to managed state
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "tuned", "tuning-pidmax", "--ignore-not-found").Execute()
defer patchTunedState(oc, ntoNamespace, "default", "Managed")
ntoRes := ntoResource{
name: "tuning-pidmax",
namespace: ntoNamespace,
template: customTunedProfile,
sysctlparm: "kernel.pid_max",
sysctlvalue: "182218",
}
oc.SetupProject()
ntoTestNS := oc.Namespace()
//Clean up the custom profile user-max-mnt-namespaces and unlabel the nginx pod
defer ntoRes.delete(oc)
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
// AppImageName := exutil.GetImagestreamImageName(oc, "tests")
// if len(AppImageName) == 0 {
AppImageName := "quay.io/openshifttest/nginx-alpine@sha256:04f316442d48ba60e3ea0b5a67eb89b0b667abf1c198a3d0056ca748736336a0"
// }
//Create a nginx web application pod
exutil.By("Create a nginx web pod in nto temp namespace")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", podNginxFile, "-p", "IMAGENAME="+AppImageName)
//Check if nginx pod is ready
exutil.AssertPodToBeReady(oc, "nginx", ntoTestNS)
//Get the node name in the same node as nginx app
tunedNodeName, err := exutil.GetPodNodeName(oc, ntoTestNS, "nginx")
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node as nginx app
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
e2e.Logf("the tuned name on node %v is %v", tunedNodeName, tunedPodName)
//Label pod nginx with tuned.openshift.io/elasticsearch=
exutil.By("Label nginx pod as tuned.openshift.io/elasticsearch=")
err = exutil.LabelPod(oc, ntoTestNS, "nginx", "tuned.openshift.io/elasticsearch=")
o.Expect(err).NotTo(o.HaveOccurred())
//Apply new profile that match label tuned.openshift.io/elasticsearch=
exutil.By("Apply new profile from CR")
ntoRes.createTunedProfileIfNotExist(oc)
//Verify if the new profile is applied
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-pidmax", "True")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("tuning-pidmax"))
exutil.By("Check logs, profile changes SHOULD be applied since tuned is MANAGED")
logsCheck, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, "--tail=9", tunedPodName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(logsCheck).To(o.ContainSubstring("tuning-pidmax"))
exutil.By("Compare if the value user.max_ipc_namespaces in on node with labeled pod, should be 182218")
compareSysctlValueOnSepcifiedNodeByName(oc, tunedNodeName, "kernel.pid_max", "", "182218")
exutil.By("Patch default tuned to 'Removed'")
err = patchTunedState(oc, ntoNamespace, "default", "Removed")
o.Expect(err).NotTo(o.HaveOccurred())
state, err := getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Removed"))
exutil.By("Check logs, profiles, and nodes (profile changes SHOULD NOT be applied since tuned is REMOVED)")
exutil.By("Check pod status, all tuned pod should be terminated since tuned is REMOVED")
exutil.WaitForNoPodsAvailableByKind(oc, "daemonset", "tuned", ntoNamespace)
podCheck, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "pods").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podCheck).NotTo(o.ContainSubstring("tuned"))
exutil.By("Check profile status, all node profile should be removed since tuned is REMOVED)")
profileCheck, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.ContainSubstring("No resources"))
exutil.By("Change tuned state back to managed ...")
err = patchTunedState(oc, ntoNamespace, "default", "Managed")
o.Expect(err).NotTo(o.HaveOccurred())
state, err = getTunedState(oc, ntoNamespace, "default")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.Equal("Managed"))
exutil.By("Get the tuned node and pod names")
//Get the node name in the same node as nginx app
tunedNodeName, err = exutil.GetPodNodeName(oc, ntoTestNS, "nginx")
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node as nginx app
tunedPodName = getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
exutil.By("Check logs, profiles, and nodes (profile changes SHOULD be applied since tuned is MANAGED)")
//Verify if the new profile is applied
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-pidmax", "True")
profileCheck, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("tuning-pidmax"))
exutil.By("Check logs, profile changes SHOULD be applied since tuned is MANAGED)")
logsCheck, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, "--tail=9", tunedPodName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(logsCheck).To(o.ContainSubstring("tuning-pidmax"))
exutil.By("Compare if the value user.max_ipc_namespaces in on node with labeled pod, should be 182218")
compareSysctlValueOnSepcifiedNodeByName(oc, tunedNodeName, "kernel.pid_max", "", "182218")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
710acd9f-0eb6-49b5-a404-dcdde9862c31
|
Longduration-NonPreRelease-Author:liqcui-Medium-30589-NTO Use MachineConfigs to lay down files needed for tuned [Disruptive] [Slow]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-30589-NTO Use MachineConfigs to lay down files needed for tuned [Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-rt", "worker-rt", 300)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-rt-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-realtime", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Create machine config pool")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ntoMCPFile, "-p", "MCP_NAME=worker-rt")
exutil.By("Label the node with node-role.kubernetes.io/worker-rt=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-rt=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create openshift-realtime profile")
//ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
// o.Expect(err).NotTo(o.HaveOccurred())
// if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", ntoRealtimeFile, "-p", "INCLUDE=openshift-node,realtime")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert if machine config pool applied for worker nodes")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 300)
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-rt", 300)
exutil.By("Assert if openshift-realtime profile was applied ...")
//Verify if the new profile is applied
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-realtime")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("openshift-realtime"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert if isolcpus was applied in machineconfig...")
AssertTunedAppliedMC(oc, "nto-worker-rt", "isolcpus=")
exutil.By("Assert if isolcpus was applied in labled node...")
isMatch := AssertTunedAppliedToNode(oc, tunedNodeName, "isolcpus=")
o.Expect(isMatch).To(o.Equal(true))
exutil.By("Delete openshift-realtime tuned in labled node...")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-realtime", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Check Nodes for expected changes")
assertIfNodeSchedulingDisabled(oc)
exutil.By("Assert if machine config pool applied for worker nodes")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-rt", 300)
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert if isolcpus was applied in labled node...")
isMatch = AssertTunedAppliedToNode(oc, tunedNodeName, "isolcpus=")
o.Expect(isMatch).To(o.Equal(false))
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Delete custom MC and MCP by following right way...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-rt-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-rt", "worker-rt", 300)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
ee0acd0b-60b6-4d85-b9f5-2fbabf58211c
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-29804-Tuned profile is updated after incorrect tuned CR is fixed [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-29804-Tuned profile is updated after incorrect tuned CR is fixed [Disruptive]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
is3Master := exutil.Is3MasterNoDedicatedWorkerNode(oc)
var (
tunedNodeName string
err error
)
//Use the last worker node as labeled node
//Support 3 master/worker node, no dedicated worker nodes
if !is3Master && !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
e2e.Logf("tunedNodeName is:\n%v", tunedNodeName)
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "ips", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Label the node with tuned=ips")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned=ips", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create ips-host profile, new tuned should automatically handle duplicate sysctl settings")
//Define duplicated parameter and value
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", IPSFile, "-p", "SYSCTLPARM1=kernel.pid_max", "SYSCTLVALUE1=1048575", "SYSCTLPARM2=kernel.pid_max", "SYSCTLVALUE2=1048575")
exutil.By("Assert recommended profile (ips-host) matches current configuration in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "15", 180, `recommended profile \(ips-host\) matches current configuration|\(ips-host\) match|'ips-host' applied`)
exutil.By("Check if new custom profile applied to label node")
o.Expect(assertNTOCustomProfileStatus(oc, ntoNamespace, tunedNodeName, "ips-host", "True", "False")).To(o.Equal(true))
//Only used for debug info
exutil.By("Check current profile for each node")
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
e2e.Logf("Current profile for each node: \n%v", output)
//New tuned can automatically de-duplicate value of sysctl, no duplicate error anymore
exutil.By("Assert if the duplicate value of sysctl kernel.pid_max take effective on target node, expected value should be 1048575")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "kernel.pid_max", "1048575")
exutil.By("Get default value of fs.mount-max on label node")
defaultMaxMapCount := getValueOfSysctlByName(oc, ntoNamespace, tunedNodeName, "fs.mount-max")
o.Expect(defaultMaxMapCount).NotTo(o.BeEmpty())
e2e.Logf("The default value of sysctl fs.mount-max is %v", defaultMaxMapCount)
//setting an invalid value for ips-host profile
exutil.By("Update ips-host profile with invalid value of fs.mount-max = -1")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", IPSFile, "-p", "SYSCTLPARM1=fs.mount-max", "SYSCTLVALUE1=-1", "SYSCTLPARM2=kernel.pid_max", "SYSCTLVALUE2=1048575")
exutil.By("Assert static tuning from profile 'ips-host' applied in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "20", 180, `'ips-host' applied|recommended profile \(ips-host\) matches current configuration`)
exutil.By("Check if new custom profile applied to label node")
o.Expect(assertNTOCustomProfileStatus(oc, ntoNamespace, tunedNodeName, "ips-host", "True", "True")).To(o.Equal(true))
exutil.By("Check current profile for each node")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
e2e.Logf("Current profile for each node: \n%v", output)
//The invalid value won't impact default value of fs.mount-max
exutil.By("Assert if the value of sysctl fs.mount-max still use default value")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "fs.mount-max", defaultMaxMapCount)
//setting an new value of fs.mount-max for ips-host profile
exutil.By("Update ips-host profile with new value of fs.mount-max = 868686")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", IPSFile, "-p", "SYSCTLPARM1=fs.mount-max", "SYSCTLVALUE1=868686", "SYSCTLPARM2=kernel.pid_max", "SYSCTLVALUE2=1048575")
exutil.By("Assert recommended profile (ips-host) matches current configuration in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "15", 180, `recommended profile \(ips-host\) matches current configuration|\(ips-host\) match|'ips-host' applied`)
exutil.By("Check if new custom profile applied to label node")
o.Expect(assertNTOCustomProfileStatus(oc, ntoNamespace, tunedNodeName, "ips-host", "True", "False")).To(o.Equal(true))
exutil.By("Check current profile for each node")
output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
e2e.Logf("Current profile for each node: \n%v", output)
//The invalid value won't impact default value of fs.mount-max
exutil.By("Assert if the new value of sysctl fs.mount-max take effective, expected value is 868686")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "fs.mount-max", "868686")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
23157db8-d8b7-4453-bf28-7302d51352f5
|
Longduration-NonPreRelease-Author:liqcui-Medium-39123-NTO Operator will update tuned after changing included profile [Disruptive] [Slow]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-39123-NTO Operator will update tuned after changing included profile [Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
skipPAODeploy := skipDeployPAO(oc)
isPAOInstalled = exutil.IsPAOInstalled(oc)
if skipPAODeploy || isPAOInstalled {
e2e.Logf("PAO has been installed and continue to execute test case")
} else {
isPAOInOperatorHub := exutil.IsPAOInOperatorHub(oc)
if !isPAOInOperatorHub {
g.Skip("PAO is not in OperatorHub - skipping test ...")
}
exutil.InstallPAO(oc, paoNamespace)
}
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-cnf", "worker-cnf", 300)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "performance-patch", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("PerformanceProfile", "performance", "--ignore-not-found").Execute()
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-cnf-").Execute()
exutil.By("Label the node with node-role.kubernetes.io/worker-cnf=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-cnf=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// currently test is only supported on AWS, GCP, and Azure
// if iaasPlatform == "aws" || iaasPlatform == "gcp" {
ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
//Only GCP and AWS support realtime-kenel
exutil.By("Apply performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoPerformanceFile, "-p", "ISENABLED=true")
} else {
exutil.By("Apply performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoPerformanceFile, "-p", "ISENABLED=false")
}
exutil.By("Apply worker-cnf machineconfigpool")
exutil.ApplyOperatorResourceByYaml(oc, paoNamespace, paoWorkerCnfMCPFile)
exutil.By("Assert if the MCP worker-cnf has been successfully applied ...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-cnf", 900)
exutil.By("Check if new NTO profile openshift-node-performance-performance was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-performance-performance")
exutil.By("Check if profile openshift-node-performance-performance applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-performance"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if tuned pod logs contains openshift-node-performance-performance on labeled nodes")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "20", 60, "openshift-node-performance-performance")
exutil.By("Check if the linux kernel parameter as vm.stat_interval = 10")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "vm.stat_interval", "10")
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Apply performance-patch profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, paoPerformancePatchFile)
exutil.By("Assert if the MCP worker-cnf is ready after node rebooted ...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-cnf", 750)
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if profile what's active profile applied on nodes")
nodeProfileName, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-performance"))
exutil.By("Check if tuned pod logs contains Cannot find profile 'openshift-node-performance-example-performanceprofile' on labeled nodes")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "30", 60, "Cannot find profile")
exutil.By("Check if the linux kernel parameter as vm.stat_interval = 1")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "vm.stat_interval", "1")
exutil.By("Patch include to include=openshift-node-performance-performance")
err = patchTunedProfile(oc, ntoNamespace, "performance-patch", paoPerformanceFixpatchFile)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert if the MCP worker-cnf is ready after node rebooted ...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-cnf", 600)
exutil.By("Check if new NTO profile performance-patch was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "performance-patch")
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if contains static tuning from profile 'performance-patch' applied in tuned pod logs on labeled nodes")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "30", 60, `static tuning from profile 'performance-patch' applied|recommended profile \(performance-patch\) matches current configuration`)
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if the linux kernel parameter as vm.stat_interval = 10")
compareSpecifiedValueByNameOnLabelNode(oc, tunedNodeName, "vm.stat_interval", "10")
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Delete custom MC and MCP by following right way...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-cnf-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-cnf", "worker-cnf", 480)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
8f249e18-5f88-46a7-ab26-ad53b8879123
|
Longduration-NonPreRelease-Author:liqcui-Medium-45686-NTO Creating tuned profile with references to not yet existing Performance Profile configuration.[Disruptive] [Slow]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-45686-NTO Creating tuned profile with references to not yet existing Performance Profile configuration.[Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
skipPAODeploy := skipDeployPAO(oc)
isPAOInstalled = exutil.IsPAOInstalled(oc)
if skipPAODeploy || isPAOInstalled {
e2e.Logf("PAO has been installed and continue to execute test case")
} else {
isPAOInOperatorHub := exutil.IsPAOInOperatorHub(oc)
if !isPAOInOperatorHub {
g.Skip("PAO is not in OperatorHub - skipping test ...")
}
exutil.InstallPAO(oc, paoNamespace)
}
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-optimize", "worker-optimize", 360)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "include-performance-profile", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("PerformanceProfile", "optimize", "--ignore-not-found").Execute()
//Use the last worker node as labeled node
tunedNodeName, err := exutil.GetLastLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-optimize-").Execute()
exutil.By("Label the node with node-role.kubernetes.io/worker-optimize=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-optimize=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Apply worker-optimize machineconfigpool")
exutil.ApplyOperatorResourceByYaml(oc, paoNamespace, paoWorkerOptimizeMCPFile)
exutil.By("Assert if the MCP has been successfully applied ...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-optimize", 600)
isSNO = exutil.IsSNOCluster(oc)
if isSNO {
exutil.By("Apply include-performance-profile tuned profile")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", paoIncludePerformanceProfile, "-p", "ROLENAME=master")
exutil.By("Assert if the mcp is ready after server has been successfully rebooted...")
exutil.AssertIfMCPChangesAppliedByName(oc, "master", 600)
} else {
exutil.By("Apply include-performance-profile tuned profile")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", paoIncludePerformanceProfile, "-p", "ROLENAME=worker-optimize")
exutil.By("Assert if the mcp is ready after server has been successfully rebooted...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-optimize", 600)
}
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if profile what's active profile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
if isSNO {
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-control-plane"))
} else {
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node"))
}
exutil.By("Check if tuned pod logs contains Cannot find profile 'openshift-node-performance-optimize' on labeled nodes")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 60, "Cannot find profile 'openshift-node-performance-optimize'")
if isSNO {
exutil.By("Apply performance optimize profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoPerformanceOptimizeFile, "-p", "ROLENAME=master")
exutil.By("Assert if the mcp is ready after server has been successfully rebooted...")
exutil.AssertIfMCPChangesAppliedByName(oc, "master", 600)
} else {
exutil.By("Apply performance optimize profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoPerformanceOptimizeFile, "-p", "ROLENAME=worker-optimize")
exutil.By("Assert if the mcp is ready after server has been successfully rebooted...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-optimize", 600)
}
exutil.By("Check performance profile tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-performance-optimize"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile performance-patch was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "include-performance-profile")
exutil.By("Check if profile what's active profile applied on nodes")
nodeProfileName, err = getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("include-performance-profile"))
exutil.By("Check if contains static tuning from profile 'include-performance-profile' applied in tuned pod logs on labeled nodes")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "20", 60, `static tuning from profile 'include-performance-profile' applied|recommended profile \(include-performance-profile\) matches current configuration`)
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Delete custom MC and MCP by following right way...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-optimize-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-optimize", "worker-optimize", 480)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
ae019593-5283-441e-83f7-1557a2c0c12b
|
NonHyperShiftHOST-Author:liqcui-Medium-36152-NTO Get metrics and alerts
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("NonHyperShiftHOST-Author:liqcui-Medium-36152-NTO Get metrics and alerts", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
//get metric information that require ssl auth
sslKey := "/etc/prometheus/secrets/metrics-client-certs/tls.key"
sslCrt := "/etc/prometheus/secrets/metrics-client-certs/tls.crt"
//Get NTO metrics data
exutil.By("Get NTO metrics informaton without ssl, should be denied access, throw error...")
metricsOutput, metricsError := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "sts/prometheus-k8s", "-c", "prometheus", "--", "curl", "-k", "https://node-tuning-operator.openshift-cluster-node-tuning-operator.svc:60000/metrics").Output()
o.Expect(metricsError).Should(o.HaveOccurred())
o.Expect(metricsOutput).NotTo(o.BeEmpty())
o.Expect(metricsOutput).To(o.Or(
o.ContainSubstring("bad certificate"),
o.ContainSubstring("errno = 104"),
o.ContainSubstring("certificate required"),
o.ContainSubstring("error:1409445C"),
o.ContainSubstring("exit code 56"),
o.ContainSubstring("errno = 32")))
exutil.By("Get NTO metrics informaton with ssl key and crt, should be access, get the metric information...")
metricsOutput, metricsError = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "sts/prometheus-k8s", "-c", "prometheus", "--", "curl", "-k", "--key", sslKey, "--cert", sslCrt, "https://node-tuning-operator.openshift-cluster-node-tuning-operator.svc:60000/metrics").Output()
o.Expect(metricsOutput).NotTo(o.BeEmpty())
o.Expect(metricsError).NotTo(o.HaveOccurred())
e2e.Logf("The metrics information of NTO as below: \n%v", metricsOutput)
//Assert the key metrics
exutil.By("Check if all metrics exist as expected...")
o.Expect(metricsOutput).To(o.And(
o.ContainSubstring("nto_build_info"),
o.ContainSubstring("nto_pod_labels_used_info"),
o.ContainSubstring("nto_degraded_info"),
o.ContainSubstring("nto_profile_calculated_total")))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
25f09512-f7fc-4a38-992f-5ebafd1af2a4
|
NonPreRelease-Longduration-Author:liqcui-Medium-49265-NTO support automatically rotate ssl certificate. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("NonPreRelease-Longduration-Author:liqcui-Medium-49265-NTO support automatically rotate ssl certificate. [Disruptive]", func() {
// test requires NTO to be installed
is3CPNoWorker := exutil.Is3MasterNoDedicatedWorkerNode(oc)
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || is3CPNoWorker || isSNO {
g.Skip("NTO is not installed or No need to test on compact cluster - skipping test ...")
}
//Use the last worker node as labeled node
tunedNodeName, err = exutil.GetLastLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The tuned node name is: \n%v", tunedNodeName)
//Get NTO operator pod name
ntoOperatorPod, err := getNTOPodName(oc, ntoNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The tuned operator pod name is: \n%v", ntoOperatorPod)
metricEndpoint := getServiceENDPoint(oc, ntoNamespace)
exutil.By("Get information about the certificate the metrics server in NTO")
openSSLOutputBefore, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "/bin/bash", "-c", "/bin/openssl s_client -connect "+metricEndpoint+" 2>/dev/null </dev/null").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Get information about the creation and expiration date of the certificate")
openSSLExpireDateBefore, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "/bin/bash", "-c", "/bin/openssl s_client -connect "+metricEndpoint+" 2>/dev/null </dev/null | /bin/openssl x509 -noout -dates").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The openSSL Expired Date information of NTO openSSL before rotate as below: \n%v", openSSLExpireDateBefore)
encodeBase64OpenSSLOutputBefore := exutil.StringToBASE64(openSSLOutputBefore)
encodeBase64OpenSSLExpireDateBefore := exutil.StringToBASE64(openSSLExpireDateBefore)
//To improve the sucessful rate, execute oc delete secret/node-tuning-operator-tls instead of oc -n openshift-service-ca secret/signing-key
//The last one "oc -n openshift-service-ca secret/signing-key" take more time to complete, but need to manually execute once failed.
exutil.By("Delete secret/node-tuning-operator-tls to automate to create a new one certificate")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "secret/node-tuning-operator-tls").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert NTO logs to match key words restarting metrics server to rotate certificates")
assertNTOPodLogsLastLines(oc, ntoNamespace, ntoOperatorPod, "4", 240, "restarting metrics server to rotate certificates")
exutil.By("Assert if NTO rotate certificates ...")
AssertNTOCertificateRotate(oc, ntoNamespace, tunedNodeName, encodeBase64OpenSSLOutputBefore, encodeBase64OpenSSLExpireDateBefore)
exutil.By("The certificate extracted from the openssl command should match the first certificate from the tls.crt file in the secret")
compareCertificateBetweenOpenSSLandTLSSecret(oc, ntoNamespace, tunedNodeName)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
4fcdc358-a83b-4ea2-9d31-b15b4cc44122
|
Longduration-NonPreRelease-Author:liqcui-Medium-49371-NTO will not restart tuned daemon when profile application take too long [Disruptive] [Slow]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-49371-NTO will not restart tuned daemon when profile application take too long [Disruptive] [Slow]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
//The restart tuned has removed due to timeout in the bug https://issues.redhat.com/browse/OCPBUGS-30647
//Use the first worker node as labeled node
tunedNodeName, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "worker-stuck-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-profile-stuck", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Label the node with worker-stack=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "worker-stuck=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create openshift-profile-stuck profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, workerStackFile)
exutil.By("Check openshift-profile-stuck tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-profile-stuck"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert recommended profile (openshift-profile-stuck) matches current configuration in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "12", 300, `recommended profile \(openshift-profile-stuck\) matches current configuration|'openshift-profile-stuck' applied`)
exutil.By("Check if new NTO profile openshift-profile-stuck was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-profile-stuck")
exutil.By("Check if profile what's active profile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-profile-stuck"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("The log shouldn't contain [ timeout (120) to apply TuneD profile; restarting TuneD daemon ] in tuned pod log")
ntoPodLogs, _ := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, tunedPodName, "--tail=10").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ntoPodLogs).NotTo(o.ContainSubstring("timeout (120) to apply TuneD profile; restarting TuneD daemon"))
exutil.By("The log shouldn't contain [ error waiting for tuned: signal: terminated ] in tuned pod log")
ntoPodLogs, _ = oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, tunedPodName, "--tail=10").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ntoPodLogs).NotTo(o.ContainSubstring("error waiting for tuned: signal: terminated"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
b03a0dcb-f3bf-467a-8c29-9a6d0bd737e5
|
Longduration-NonPreRelease-Author:liqcui-Medium-49370-NTO add huge pages to boot time via bootloader [Disruptive] [Slow]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-49370-NTO add huge pages to boot time via bootloader [Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or it's Single Node Cluster- skipping test ...")
}
//Use the last worker node as labeled node
tunedNodeName, err := exutil.GetLastLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node that labeled node
//tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-hp", "worker-hp", 300)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-hp-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "hugepages", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Label the node with node-role.kubernetes.io/worker-hp=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-hp=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create hugepages tuned profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, hugepageTunedBoottimeFile)
exutil.By("Check hugepages tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("hugepages"))
exutil.By("Create worker-hp machineconfigpool ...")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, hugepageMCPfile)
exutil.By("Assert if the MCP has been successfully applied ...")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-hp", 720)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-hugepages")
exutil.By("Check if profile openshift-node-hugepages applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-hugepages"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check value of allocatable.hugepages-2Mi in labled node ")
nodeHugePagesOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.allocatable.hugepages-2Mi}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeHugePagesOutput).To(o.ContainSubstring("100M"))
oc.SetupProject()
ntoTestNS := oc.Namespace()
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
AppImageName := exutil.GetImagestreamImageName(oc, "tests")
if len(AppImageName) == 0 {
AppImageName = "quay.io/openshifttest/nginx-alpine@sha256:04f316442d48ba60e3ea0b5a67eb89b0b667abf1c198a3d0056ca748736336a0"
}
//Create a hugepages-app application pod
exutil.By("Create a hugepages-app pod to consume hugepage in nto temp namespace")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", hugepage100MPodFile, "-p", "IMAGENAME="+AppImageName)
//Check if hugepages-appis ready
exutil.By("Check if a hugepages-app pod is ready ...")
exutil.AssertPodToBeReady(oc, "hugepages-app", ntoTestNS)
exutil.By("Check the value of /etc/podinfo/hugepages_2M_request, the value expected is 105 ...")
podInfo, err := exutil.RemoteShPod(oc, ntoTestNS, "hugepages-app", "cat", "/etc/podinfo/hugepages_2M_request")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podInfo).To(o.ContainSubstring("105"))
exutil.By("Check the value of REQUESTS_HUGEPAGES in env on pod ...")
envInfo, err := exutil.RemoteShPodWithBash(oc, ntoTestNS, "hugepages-app", "env | grep REQUESTS_HUGEPAGES")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(envInfo).To(o.ContainSubstring("REQUESTS_HUGEPAGES_2Mi=104857600"))
exutil.By("The right way to delete custom MC and MCP...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-hp-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-hp", "worker-hp", 480)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
2eb59b41-0c20-4e31-8574-b8f6553bfd74
|
NonPreRelease-Longduration-Author:liqcui-Medium-49439-NTO can start and stop stalld when relying on Tuned '[service]' plugin.[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("NonPreRelease-Longduration-Author:liqcui-Medium-49439-NTO can start and stop stalld when relying on Tuned '[service]' plugin.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
//Use the first rhcos worker node as labeled node
tunedNodeName, err := exutil.GetFirstCoreOsWorkerNode(oc)
e2e.Logf("tunedNodeName is [ %v ]", tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
if len(tunedNodeName) == 0 {
g.Skip("Skip Testing on RHEL worker or windows node")
}
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-stalld-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-stalld", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer exutil.DebugNodeWithChroot(oc, tunedNodeName, "/usr/bin/throttlectl", "on")
exutil.By("Set off for /usr/bin/throttlectl before enable stalld")
switchThrottlectlOnOff(oc, ntoNamespace, tunedNodeName, "off", 30)
exutil.By("Label the node with node-role.kubernetes.io/worker-stalld=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-stalld=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create openshift-stalld tuned profile")
exutil.CreateNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", stalldTunedFile, "-p", "STALLD_STATUS=start,enable")
exutil.By("Check openshift-stalld tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-stalld"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-stalld")
exutil.By("Check if profile openshift-stalld applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-stalld"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if stalld service is running ...")
stalldStatus, err := exutil.DebugNodeWithChroot(oc, tunedNodeName, "systemctl", "status", "stalld")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stalldStatus).To(o.ContainSubstring("active (running)"))
exutil.By("Apply openshift-stalld with stop,disable tuned profile")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", stalldTunedFile, "-p", "STALLD_STATUS=stop,disable")
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-stalld")
exutil.By("Check if stalld service is inactive and stopped ...")
stalldStatus, _ = exutil.DebugNodeWithOptionsAndChroot(oc, tunedNodeName, []string{"-q", "--to-namespace", ntoNamespace}, "systemctl", "status", "stalld")
o.Expect(stalldStatus).NotTo(o.BeEmpty())
o.Expect(stalldStatus).To(o.ContainSubstring("inactive (dead)"))
exutil.By("Apply openshift-stalld with start,enable tuned profile")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", stalldTunedFile, "-p", "STALLD_STATUS=start,enable")
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-stalld")
exutil.By("Check if stalld service is running again ...")
stalldStatus, _, err = exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"-q", "--to-namespace", ntoNamespace}, "systemctl", "status", "stalld")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stalldStatus).NotTo(o.BeEmpty())
o.Expect(stalldStatus).To(o.ContainSubstring("active (running)"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
6ad8fefb-5189-4ebc-9963-a953a1138caf
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-49441-NTO Applying a profile with multiple inheritance where parents include a common ancestor. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-49441-NTO Applying a profile with multiple inheritance where parents include a common ancestor. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
//trying to include two profiles that share the same parent profile "throughput-performance". An example of such profiles
// are the openshift-node --> openshift --> (virtual-guest) --> throughput-performance and postgresql profiles.
//Use the first worker node as labeled node
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if exutil.IsMachineSetExist(oc) && !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned.openshift.io/openshift-node-postgresql-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-node-postgresql", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Label the node with tuned.openshift.io/openshift-node-postgresql=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned.openshift.io/openshift-node-postgresql=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check postgresql profile /usr/lib/tuned/postgresql/tuned.conf include throughput-performance profile")
postGreSQLProfile, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/postgresql/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(postGreSQLProfile).To(o.ContainSubstring("throughput-performance"))
exutil.By("Check postgresql profile /usr/lib/tuned/openshift-node/tuned.conf include openshift profile")
openshiftNodeProfile, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/openshift-node/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftNodeProfile).To(o.ContainSubstring(`include=openshift`))
exutil.By("Check postgresql profile /usr/lib/tuned/openshift/tuned.conf include throughput-performance profile")
openshiftProfile, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/usr/lib/tuned/openshift/tuned.conf")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftProfile).To(o.ContainSubstring("throughput-performance"))
exutil.By("Create openshift-node-postgresql tuned profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, openshiftNodePostgresqlFile)
exutil.By("Check openshift-node-postgresql tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-postgresql"))
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-postgresql")
exutil.By("Check if profile openshift-node-postgresql applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-postgresql"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert recommended profile (openshift-node-postgresql) matches current configuration in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "10", 300, `recommended profile \(openshift-node-postgresql\) matches current configuration|static tuning from profile 'openshift-node-postgresql' applied`)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
ba1a7e25-0ab8-4296-9f59-59697444fd07
|
NonHyperShiftHOST-Author:liqcui-Medium-49705-Tuned net plugin handle net devices with n/a value for a channel. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("NonHyperShiftHOST-Author:liqcui-Medium-49705-Tuned net plugin handle net devices with n/a value for a channel. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed or hosted cluster - skipping test ...")
}
if iaasPlatform == "vsphere" || iaasPlatform == "openstack" || iaasPlatform == "none" || iaasPlatform == "powervs" {
g.Skip("IAAS platform: " + iaasPlatform + " doesn't support cloud provider profile - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
exutil.By("Check default channel for host network adapter, not expected Combined: 1, if so, skip testing ...")
//assertIFChannelQueuesStatus is used for checking if match Combined: 1
//If match <Combined: 1>, skip testing
isMatch := assertIFChannelQueuesStatus(oc, ntoNamespace, tunedNodeName)
if isMatch {
g.Skip("Only one NIC queues or Unsupported NIC - skipping test ...")
}
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", tunedPodName, "-n", ntoNamespace, "node-role.kubernetes.io/netplugin-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "net-plugin", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Label the node with node-role.kubernetes.io/netplugin=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("pod", tunedPodName, "-n", ntoNamespace, "node-role.kubernetes.io/netplugin=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create net-plugin tuned profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, netPluginFile)
exutil.By("Check net-plugin tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("net-plugin"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert tuned.plugins.base: instance net: assigning devices match in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "180", 300, "tuned.plugins.base: instance net: assigning devices")
exutil.By("Assert active and recommended profile (net-plugin) match in tuned pod log")
assertNTOPodLogsLastLines(oc, ntoNamespace, tunedPodName, "180", 300, `profile 'net-plugin' applied|profile \(net-plugin\) match`)
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "net-plugin")
exutil.By("Check if profile net-plugin applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(nodeProfileName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("net-plugin"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check channel for host network adapter, expected Combined: 1")
o.Expect(assertIFChannelQueuesStatus(oc, ntoNamespace, tunedNodeName)).To(o.BeTrue())
exutil.By("Delete tuned net-plugin and check channel for host network adapater again")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "net-plugin", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Check if profile openshift-node|openshift-control-plane applied on nodes")
if isSNO {
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-control-plane")
} else {
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node")
}
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check channel for host network adapter, not expected Combined: 1")
o.Expect(assertIFChannelQueuesStatus(oc, ntoNamespace, tunedNodeName)).To(o.BeFalse())
})
| ||||||
test case
|
openshift/openshift-tests-private
|
ab5fe8ef-a652-4f89-89dd-20898bd10711
|
ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-49617-NTO support cloud-provider specific profiles for NTO/TuneD. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-OSD_CCS-NonHyperShiftHOST-Author:liqcui-Medium-49617-NTO support cloud-provider specific profiles for NTO/TuneD. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
if iaasPlatform == "none" {
g.Skip("IAAS platform: " + iaasPlatform + " doesn't support cloud provider profile - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Get cloud provider name ...")
providerName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("profiles.tuned.openshift.io", tunedNodeName, "-n", ntoNamespace, "-ojsonpath={.spec.config.providerName}").Output()
o.Expect(providerName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "provider-"+providerName, "-n", ntoNamespace, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "provider-abc", "-n", ntoNamespace, "--ignore-not-found").Execute()
providerID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.spec.providerID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(providerID).NotTo(o.BeEmpty())
o.Expect(providerID).To(o.ContainSubstring(providerName))
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value should be 8192")
sysctlOutput, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "sysctl", "vm.admin_reserve_kbytes")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(sysctlOutput).NotTo(o.BeEmpty())
o.Expect(sysctlOutput).To(o.ContainSubstring("vm.admin_reserve_kbytes = 8192"))
exutil.By("Apply cloud-provider profile ...")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cloudProviderFile, "-p", "PROVIDER_NAME="+providerName)
exutil.By("Check /var/lib/tuned/provider on target nodes")
openshiftProfile, err := exutil.RemoteShPod(oc, ntoNamespace, tunedPodName, "cat", "/var/lib/ocp-tuned/provider")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(openshiftProfile).NotTo(o.BeEmpty())
o.Expect(openshiftProfile).To(o.ContainSubstring(providerName))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check tuned for NTO")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned.tuned.openshift.io").Output()
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current tuned for NTO: \n%v", output)
exutil.By("Check provider + providerName profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(tunedNames).To(o.ContainSubstring("provider-" + providerName))
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value is 16386")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "vm.admin_reserve_kbytes", "16386")
exutil.By("Remove cloud-provider profile, the value of vm.admin_reserve_kbytes rollback to 8192")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "provider-"+providerName, "-n", ntoNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value should be 8192")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "vm.admin_reserve_kbytes", "8192")
exutil.By("Apply cloud-provider-abc profile,the abc doesn't belong to any cloud provider ...")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cloudProviderFile, "-p", "PROVIDER_NAME=abc")
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value should be no change, still is 8192")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "vm.admin_reserve_kbytes", "8192")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
5183bfe7-ae1d-4e62-8e61-d037ca2ffe92
|
Author:liqcui-Medium-45593-NTO Operator set io_timeout for AWS Nitro instances in correct way.[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Author:liqcui-Medium-45593-NTO Operator set io_timeout for AWS Nitro instances in correct way.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
// currently test is only supported on AWS
if iaasPlatform == "aws" {
exutil.By("Expected /sys/module/nvme_core/parameters/io_timeout value on each node is: 4294967295")
assertIOTimeOutandMaxRetries(oc, ntoNamespace)
} else {
g.Skip("Test Case 45593 doesn't support on other cloud platform, only support aws - skipping test ...")
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
3c3990ee-b0d9-4e67-8b11-336d2badb3ac
|
Author:liqcui-Medium-27420-NTO Operator is providing default tuned.[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Author:liqcui-Medium-27420-NTO Operator is providing default tuned.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
defaultTunedCreateTimeBefore, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "default", "-n", ntoNamespace, "-ojsonpath={.metadata.creationTimestamp}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultTunedCreateTimeBefore).NotTo(o.BeEmpty())
exutil.By("Delete the default tuned ...")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "default", "-n", ntoNamespace).Execute()
exutil.By("The make sure the tuned default created and ready")
confirmedTunedReady(oc, ntoNamespace, "default", 60)
defaultTunedCreateTimeAfter, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "default", "-n", ntoNamespace, "-ojsonpath={.metadata.creationTimestamp}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultTunedCreateTimeAfter).NotTo(o.BeEmpty())
o.Expect(defaultTunedCreateTimeAfter).NotTo(o.ContainSubstring(defaultTunedCreateTimeBefore))
defaultTunedCreateTimeBefore, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "default", "-n", ntoNamespace, "-ojsonpath={.metadata.creationTimestamp}").Output()
o.Expect(defaultTunedCreateTimeBefore).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
defaultTunedCreateTimeAfter, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "default", "-n", ntoNamespace, "-ojsonpath={.metadata.creationTimestamp}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultTunedCreateTimeAfter).NotTo(o.BeEmpty())
o.Expect(defaultTunedCreateTimeAfter).To(o.ContainSubstring(defaultTunedCreateTimeBefore))
e2e.Logf("defaultTunedCreateTimeBefore is : %v defaultTunedCreateTimeAfter is: %v", defaultTunedCreateTimeBefore, defaultTunedCreateTimeAfter)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
b9829237-64b7-4832-ab20-9056a2c5a75c
|
NonHyperShiftHOST-Author:liqcui-Medium-41552-NTO Operator Report per-node Tuned profile application status[Disruptive].
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("NonHyperShiftHOST-Author:liqcui-Medium-41552-NTO Operator Report per-node Tuned profile application status[Disruptive].", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
is3Master := exutil.Is3MasterNoDedicatedWorkerNode(oc)
masterNodeName := getFirstMasterNodeName(oc)
defaultMasterProfileName := getDefaultProfileNameOnMaster(oc, masterNodeName)
//NTO will provides two default tuned, one is openshift-control-plane, another is openshift-node
exutil.By("Check the default tuned profile list per nodes")
profileOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("profiles.tuned.openshift.io", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileOutput).NotTo(o.BeEmpty())
if isSNO || is3Master {
o.Expect(profileOutput).To(o.ContainSubstring(defaultMasterProfileName))
} else {
o.Expect(profileOutput).To(o.ContainSubstring("openshift-control-plane"))
o.Expect(profileOutput).To(o.ContainSubstring("openshift-node"))
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
4c07e39e-40e9-4062-8b81-f0c963e88bd9
|
NonHyperShiftHOST-Author:liqcui-Medium-50052-NTO RHCOS-shipped stalld systemd units should use SCHED_FIFO to run stalld[Disruptive].
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("NonHyperShiftHOST-Author:liqcui-Medium-50052-NTO RHCOS-shipped stalld systemd units should use SCHED_FIFO to run stalld[Disruptive].", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
if iaasPlatform == "vsphere" || iaasPlatform == "none" {
g.Skip("IAAS platform: " + iaasPlatform + " doesn't support cloud provider profile - skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
e2e.Logf("tunedNodeName is [ %v ]", tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
if len(tunedNodeName) == 0 {
g.Skip("Skip Testing on RHEL worker or windows node")
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-stalld-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-stalld", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer exutil.DebugNodeRetryWithOptionsAndChroot(oc, tunedNodeName, []string{"-q"}, "/usr/bin/throttlectl", "on")
//Switch off throttlectl to improve sucessfull rate of stalld starting
exutil.By("Set off for /usr/bin/throttlectl before enable stalld")
switchThrottlectlOnOff(oc, ntoNamespace, tunedNodeName, "off", 30)
exutil.By("Label the node with node-role.kubernetes.io/worker-stalld=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-stalld=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create openshift-stalld tuned profile")
exutil.CreateNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", stalldTunedFile, "-p", "STALLD_STATUS=start,enable")
exutil.By("Check openshift-stalld tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-stalld"))
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-stalld")
exutil.By("Check if profile openshift-stalld applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).NotTo(o.BeEmpty())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-stalld"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if stalld service is running ...")
stalldStatus, _, err := exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"-q", "--to-namespace=" + ntoNamespace}, "systemctl", "status", "stalld")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stalldStatus).NotTo(o.BeEmpty())
o.Expect(stalldStatus).To(o.ContainSubstring("active (running)"))
exutil.By("Get stalld PID on labeled node ...")
stalldPIDStatus, _, err := exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"-q", "--to-namespace=" + ntoNamespace}, "/bin/bash", "-c", "ps -efZ | grep stalld | grep -v grep")
e2e.Logf("stalldPIDStatus is :\n%v", stalldPIDStatus)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stalldPIDStatus).NotTo(o.BeEmpty())
o.Expect(stalldPIDStatus).NotTo(o.ContainSubstring("unconfined_service_t"))
o.Expect(stalldPIDStatus).To(o.ContainSubstring("-t 20"))
exutil.By("Get stalld PID on labeled node ...")
stalldPID, _, err := exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"-q", "--to-namespace=" + ntoNamespace}, "/bin/bash", "-c", "ps -efL| grep stalld | grep -v grep | awk '{print $2}'")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stalldPID).NotTo(o.BeEmpty())
exutil.By("Get status of chrt -p stalld PID on labeled node ...")
chrtStalldPIDOutput, _, err := exutil.DebugNodeRetryWithOptionsAndChrootWithStdErr(oc, tunedNodeName, []string{"-q", "--to-namespace=" + ntoNamespace}, "/bin/bash", "-c", "chrt -ap "+stalldPID)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(chrtStalldPIDOutput).NotTo(o.BeEmpty())
o.Expect(chrtStalldPIDOutput).To(o.ContainSubstring("SCHED_FIFO"))
e2e.Logf("chrtStalldPIDOutput is :\n%v", chrtStalldPIDOutput)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0040ac83-7c69-4fa9-b7fb-c6ce4d7045c3
|
Longduration-NonPreRelease-Author:liqcui-Medium-51495-NTO PAO Shipped into NTO with basic function verification.[Disruptive][Slow].
|
['"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-51495-NTO PAO Shipped into NTO with basic function verification.[Disruptive][Slow].", func() {
var (
paoBaseProfileMCP = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile-mcp.yaml")
paoBaseProfile = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile.yaml")
paoBaseQoSPod = exutil.FixturePath("testdata", "psap", "pao", "pao-baseqos-pod.yaml")
)
if ManualPickup {
g.Skip("This is the test case that execute mannually in shared cluster ...")
}
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
skipPAODeploy := skipDeployPAO(oc)
isPAOInstalled = exutil.IsPAOInstalled(oc)
if skipPAODeploy || isPAOInstalled {
e2e.Logf("PAO has been installed and continue to execute test case")
} else {
isPAOInOperatorHub := exutil.IsPAOInOperatorHub(oc)
if !isPAOInOperatorHub {
g.Skip("PAO is not in OperatorHub - skipping test ...")
}
exutil.InstallPAO(oc, paoNamespace)
}
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-pao", "worker-pao", 480)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("performanceprofile", "pao-baseprofile", "--ignore-not-found").Execute()
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get how many cpus on the specified worker node
exutil.By("Get how many cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCPUCoresInt <= 1 {
g.Skip("the worker node don't have enough cpus - skipping test ...")
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
// //Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao-").Execute()
exutil.By("Label the node with node-role.kubernetes.io/worker-pao=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// currently test is only supported on AWS, GCP, and Azure
ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
//Only GCP and AWS support realtime-kenel
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=true")
} else {
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=false")
}
exutil.By("Check Performance Profile pao-baseprofile was created automatically")
paoBasePerformanceProfile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(paoBasePerformanceProfile).NotTo(o.BeEmpty())
o.Expect(paoBasePerformanceProfile).To(o.ContainSubstring("pao-baseprofile"))
exutil.By("Create machine config pool worker-pao")
exutil.ApplyOperatorResourceByYaml(oc, "", paoBaseProfileMCP)
exutil.By("Assert if machine config pool applied for worker nodes")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-pao", 1200)
exutil.By("Check openshift-node-performance-pao-baseprofile tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check current profile openshift-node-performance-pao-baseprofile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile openshift-node-performance-pao-baseprofile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-performance-pao-baseprofile")
exutil.By("Check if profile openshift-node-performance-pao-baseprofile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check value of allocatable.hugepages-1Gi in labled node ")
nodeHugePagesOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.allocatable.hugepages-1Gi}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeHugePagesOutput).To(o.ContainSubstring("1Gi"))
exutil.By("Check Settings of CPU Manager policy created by PAO in labled node ")
cpuManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep cpuManager").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerPolicy"))
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerReconcilePeriod"))
e2e.Logf("The settings of CPU Manager Policy on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of CPU Manager for reservedSystemCPUs created by PAO in labled node ")
cpuManagerConfOutput, err = oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep reservedSystemCPUs").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("reservedSystemCPUs"))
e2e.Logf("The settings of CPU Manager reservedSystemCPUs on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of Topology Manager for topologyManagerPolicy created by PAO in labled node ")
topologyManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep topologyManagerPolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(topologyManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(topologyManagerConfOutput).To(o.ContainSubstring("topologyManagerPolicy"))
e2e.Logf("The settings of CPU Manager topologyManagerPolicy on labeled nodes: \n%v", topologyManagerConfOutput)
// currently test is only supported on AWS, GCP, and Azure
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).To(o.Or(o.ContainSubstring("rt")))
} else {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).NotTo(o.Or(o.ContainSubstring("rt")))
}
exutil.By("Check runtimeClass setting that created by PAO ... ")
runtimeClassOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile", "pao-baseprofile", "-ojsonpath={.status.runtimeClass}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(runtimeClassOutput).NotTo(o.BeEmpty())
o.Expect(runtimeClassOutput).To(o.ContainSubstring("performance-pao-baseprofile"))
e2e.Logf("The settings of runtimeClass on labeled nodes: \n%v", runtimeClassOutput)
exutil.By("Check allocable system resouce on labeled node ... ")
allocableResource, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.allocatable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(allocableResource).NotTo(o.BeEmpty())
e2e.Logf("The allocable system resouce on labeled node: \n%v", allocableResource)
oc.SetupProject()
ntoTestNS := oc.Namespace()
//Create a guaranteed-pod application pod
exutil.By("Create a guaranteed-pod pod into temp namespace")
exutil.ApplyOperatorResourceByYaml(oc, ntoTestNS, paoBaseQoSPod)
//Check if guaranteed-pod is ready
exutil.By("Check if a guaranteed-pod pod is ready ...")
exutil.AssertPodToBeReady(oc, "guaranteed-pod", ntoTestNS)
exutil.By("Check the cpu bind to isolation CPU zone for a guaranteed-pod")
cpuManagerStateOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /var/lib/kubelet/cpu_manager_state").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerStateOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerStateOutput).To(o.ContainSubstring("guaranteed-pod"))
e2e.Logf("The settings of CPU Manager cpuManagerState on labeled nodes: \n%v", cpuManagerStateOutput)
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Delete custom MC and MCP by following correct logic ...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-pao", "worker-pao", 480)
})
| |||||
test case
|
openshift/openshift-tests-private
|
7aae30c1-8185-4f65-a86f-c10495cf2f4c
|
NonHyperShiftHOST-Author:liqcui-Medium-53053-NTO will automatically delete profile with unknown/stuck state. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("NonHyperShiftHOST-Author:liqcui-Medium-53053-NTO will automatically delete profile with unknown/stuck state. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
if iaasPlatform == "none" {
g.Skip("IAAS platform: " + iaasPlatform + " doesn't support cloud provider profile - skipping test ...")
}
var (
ntoUnknownProfile = exutil.FixturePath("testdata", "psap", "nto", "nto-unknown-profile.yaml")
)
//Get NTO operator pod name
ntoOperatorPod, err := getNTOPodName(oc, ntoNamespace)
o.Expect(ntoOperatorPod).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
isSNO := exutil.IsSNOCluster(oc)
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("Get cloud provider name ...")
providerName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("profiles.tuned.openshift.io", tunedNodeName, "-n", ntoNamespace, "-ojsonpath={.spec.config.providerName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(providerName).NotTo(o.BeEmpty())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("profiles.tuned.openshift.io", "worker-does-not-exist-openshift-node", "-n", ntoNamespace, "--ignore-not-found").Execute()
exutil.By("Apply worker-does-not-exist-openshift-node profile ...")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", ntoUnknownProfile, "-p", "PROVIDER_NAME="+providerName)
exutil.By("The profile worker-does-not-exist-openshift-node will be deleted automatically once created.")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).NotTo(o.ContainSubstring("worker-does-not-exist-openshift-node"))
exutil.By("Assert NTO logs to match key words Node 'worker-does-not-exist-openshift-node' not found")
assertNTOPodLogsLastLines(oc, ntoNamespace, ntoOperatorPod, "4", 120, " Node \"worker-does-not-exist-openshift-node\" not found")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
9bda0e4a-20e2-46c4-8ae8-9534a0cbe334
|
NonPreRelease-Longduration-Author:liqcui-Medium-59884-NTO Cgroup Blacklist multiple regular expression. [Disruptive]
|
['"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("NonPreRelease-Longduration-Author:liqcui-Medium-59884-NTO Cgroup Blacklist multiple regular expression. [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
oc.SetupProject()
ntoTestNS := oc.Namespace()
//Get the tuned pod name that run on first worker node
tunedNodeName, err := exutil.GetLastLinuxWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
//First choice to use [tests] image, the image mirrored by default in disconnected cluster
//if don't have [tests] image in some environment, we can use hello-openshift as image
//usually test imagestream shipped in all ocp and mirror the image in disconnected cluster by default
// AppImageName := exutil.GetImagestreamImageName(oc, "tests")
// if len(AppImageName) == 0 {
AppImageName := "quay.io/openshifttest/nginx-alpine@sha256:04f316442d48ba60e3ea0b5a67eb89b0b667abf1c198a3d0056ca748736336a0"
// }
//Get how many cpus on the specified worker node
exutil.By("Get how many cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCPUCoresInt <= 1 {
g.Skip("the worker node don't have enough cpus - skipping test ...")
}
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Remove custom profile (if not already removed) and remove node label")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "-n", ntoNamespace, "cgroup-scheduler-blacklist").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned-scheduler-node-").Execute()
exutil.By("Label the specified linux node with label tuned-scheduler-node")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "tuned-scheduler-node=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// setting cgroup_ps_blacklist=/kubepods\.slice/kubepods-burstable\.slice/;/system\.slice/
// the process belong the /kubepods\.slice/kubepods-burstable\.slice/ or /system\.slice/ can consume all cpuset
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
// the process doesn't belong the /kubepods\.slice/kubepods-burstable\.slice/ or /system\.slice/ can consume all cpuset
// The expected Cpus_allowed_list in /proc/$PID/status should be 0 or 0,2-N
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", ntoTestNS, "app-web", "--ignore-not-found").Execute()
exutil.By("Create pod that deletect the value of kernel.pid_max ")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", cgroupSchedulerBestEffortPod, "-p", "IMAGE_NAME="+AppImageName)
//Check if nginx pod is ready
exutil.By("Check if best effort pod is ready...")
exutil.AssertPodToBeReady(oc, "app-web", ntoTestNS)
exutil.By("Create NTO custom tuned profile cgroup-scheduler-blacklist")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cgroupSchedulerBacklist, "-p", "PROFILE_NAME=cgroup-scheduler-blacklist", `CGROUP_BLACKLIST=/kubepods\.slice/kubepods-burstable\.slice/;/system\.slice/`)
exutil.By("Check if NTO custom tuned profile cgroup-scheduler-blacklist was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "cgroup-scheduler-blacklist")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
exutil.By("Verified the cpu allow list in cgroup black list for tuned ...")
o.Expect(assertProcessInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "tuned", nodeCPUCoresInt)).To(o.Equal(true))
// The expected Cpus_allowed_list in /proc/$PID/status should be 0-N
exutil.By("Verified the cpu allow list in cgroup black list for chronyd ...")
o.Expect(assertProcessInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "chronyd", nodeCPUCoresInt)).To(o.Equal(true))
// The expected Cpus_allowed_list in /proc/$PID/status should be 0 or 0,2-N
exutil.By("Verified the cpu allow list in cgroup black list for nginx process...")
o.Expect(assertProcessNOTInCgroupSchedulerBlacklist(oc, tunedNodeName, ntoNamespace, "nginx| tail -1", nodeCPUCoresInt)).To(o.Equal(true))
})
| |||||
test case
|
openshift/openshift-tests-private
|
8a424c91-f7c4-4c8e-9e87-eb7092fd13ee
|
Longduration-NonPreRelease-Author:liqcui-Medium-60743-NTO No race to update MC when nodes with different number of CPUs are in the same MCP. [Disruptive] [Slow]
|
['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Longduration-NonPreRelease-Author:liqcui-Medium-60743-NTO No race to update MC when nodes with different number of CPUs are in the same MCP. [Disruptive] [Slow]", func() {
// test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
haveMachineSet := exutil.IsMachineSetExist(oc)
if !haveMachineSet {
g.Skip("No machineset found, skipping test ...")
}
// currently test is only supported on AWS, GCP, Azure, ibmcloud, alibabacloud
supportPlatforms := []string{"aws", "gcp", "azure", "ibmcloud", "alibabacloud"}
if !exutil.ImplStringArrayContains(supportPlatforms, iaasPlatform) {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
//Prior to choose worker nodes with machineset
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get NTO Operator Pod Name
ntoOperatorPodName := getNTOOperatorPodName(oc, ntoNamespace)
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-diffcpus", "worker-diffcpus", 480)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-diffcpus-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-bootcmdline-cpu", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("machineset", "ocp-psap-qe-diffcpus", "-n", "openshift-machine-api", "--ignore-not-found").Execute()
exutil.By("Create openshift-bootcmdline-cpu tuned profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, nodeDiffCPUsTunedBootFile)
exutil.By("Create machine config pool")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", nodeDiffCPUsMCPFile, "-p", "MCP_NAME=worker-diffcpus")
exutil.By("Label the last node with node-role.kubernetes.io/worker-diffcpus=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-diffcpus=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create a new machineset with different instance type.")
newMachinesetInstanceType := exutil.SpecifyMachinesetWithDifferentInstanceType(oc)
e2e.Logf("4 newMachinesetInstanceType is %v, ", newMachinesetInstanceType)
o.Expect(newMachinesetInstanceType).NotTo(o.BeEmpty())
exutil.CreateMachinesetbyInstanceType(oc, "ocp-psap-qe-diffcpus", newMachinesetInstanceType)
exutil.By("Wait for new node is ready when machineset created")
//1 means replicas=1
clusterinfra.WaitForMachinesRunning(oc, 1, "ocp-psap-qe-diffcpus")
exutil.By("Label the second node with node-role.kubernetes.io/worker-diffcpus=")
secondTunedNodeName := exutil.GetNodeNameByMachineset(oc, "ocp-psap-qe-diffcpus")
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", secondTunedNodeName, "node-role.kubernetes.io/worker-diffcpus-", "--overwrite").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", secondTunedNodeName, "node-role.kubernetes.io/worker-diffcpus=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Assert if the status of adding the two worker node into worker-diffcpus mcp, mcp applied")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-diffcpus", 480)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Assert if openshift-bootcmdline-cpu profile was applied ...")
//Verify if the new profile is applied
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-bootcmdline-cpu")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("openshift-bootcmdline-cpu"))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
assertNTOPodLogsLastLines(oc, ntoNamespace, ntoOperatorPodName, "25", 180, "Nodes in MCP worker-diffcpus agree on bootcmdline: cpus=")
//Comment out with an known issue, until it was fixed
exutil.By("Assert if cmdline was applied in machineconfig...")
AssertTunedAppliedMC(oc, "nto-worker-diffcpus", "cpus=")
exutil.By("Assert if cmdline was applied in labled node...")
o.Expect(AssertTunedAppliedToNode(oc, tunedNodeName, "cpus=")).To(o.Equal(true))
exutil.By("<Profiles with bootcmdline conflict> warn message will show in oc get co/node-tuning")
assertCoStatusWithKeywords(oc, "Profiles with bootcmdline conflict")
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
//Verify if the <Profiles with bootcmdline conflict> warn message disapper after removing custom tuned profile
exutil.By("Delete openshift-bootcmdline-cpu tuned in labled node...")
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "openshift-bootcmdline-cpu", "-n", ntoNamespace, "--ignore-not-found").Execute()
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Removing custom MC and MCP from mcp worker-diffcpus...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-diffcpus-").Execute()
//remove node from mcp worker-diffcpus
//To reduce time using delete machineset instead of unlabel secondTunedNodeName node
oc.AsAdmin().WithoutNamespace().Run("delete").Args("machineset", "ocp-psap-qe-diffcpus", "-n", "openshift-machine-api", "--ignore-not-found").Execute()
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", secondTunedNodeName, "node-role.kubernetes.io/worker-diffcpus-").Execute()
exutil.By("Assert if first worker node return to worker mcp")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 480)
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("<Profiles with bootcmdline conflict> warn message will disappear after removing worker node from mcp worker-diffcpus")
assertCONodeTuningStatusWithoutWARNWithRetry(oc, 180, "Profiles with bootcmdline conflict")
exutil.By("Assert if isolcpus was applied in labled node...")
o.Expect(AssertTunedAppliedToNode(oc, tunedNodeName, "cpus=")).To(o.Equal(false))
})
| |||||
test case
|
openshift/openshift-tests-private
|
56de140a-77e5-43bd-90e1-e0981113b52e
|
Author:liqcui-Medium-63223-NTO support tuning sysctl and kernel bools that applied to all nodes of nodepool-level settings in hypershift.
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Author:liqcui-Medium-63223-NTO support tuning sysctl and kernel bools that applied to all nodes of nodepool-level settings in hypershift.", func() {
//This is a ROSA HCP pre-defined case, only check result, ROSA team will create NTO tuned profile when ROSA HCP created, remove Disruptive
//Only execute on ROSA hosted cluster
isROSA := isROSAHostedCluster(oc)
if !isROSA {
g.Skip("It's not ROSA hosted cluster - skipping test ...")
}
//For ROSA Environment, we are unable to access management cluster, so discussed with ROSA team,
//ROSA team create pre-defined configmap and applied to specified nodepool with hardcode profile name.
//NTO will only check if all setting applied to the worker node on hosted cluster.
exutil.By("Check if the tuned hc-nodepool-vmdratio is created in hosted cluster nodepool")
tunedNameList, err := oc.AsAdmin().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNameList).NotTo(o.BeEmpty())
e2e.Logf("The list of tuned tunedNameList is: \n%v", tunedNameList)
o.Expect(tunedNameList).To(o.And(o.ContainSubstring("hc-nodepool-vmdratio"),
o.ContainSubstring("tuned-hugepages")))
appliedProfileList, err := oc.AsAdmin().Run("get").Args("profiles.tuned.openshift.io", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(appliedProfileList).NotTo(o.BeEmpty())
o.Expect(appliedProfileList).To(o.And(o.ContainSubstring("hc-nodepool-vmdratio"),
o.ContainSubstring("openshift-node-hugepages")))
exutil.By("Get the node name that applied to the profile hc-nodepool-vmdratio")
tunedNodeNameStdOut, err := oc.AsAdmin().Run("get").Args("profiles.tuned.openshift.io", "-n", ntoNamespace, `-ojsonpath='{.items[?(@..status.tunedProfile=="hc-nodepool-vmdratio")].metadata.name}'`).Output()
tunedNodeName := strings.Trim(tunedNodeNameStdOut, "'")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
exutil.By("Assert the value of sysctl vm.dirty_ratio, the expecte value should be 55")
debugNodeStdout, err := oc.AsAdmin().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "sysctl", "vm.dirty_ratio").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The value of sysctl vm.dirty_ratio on node %v is: \n%v\n", tunedNodeName, debugNodeStdout)
o.Expect(debugNodeStdout).To(o.ContainSubstring("vm.dirty_ratio = 55"))
exutil.By("Get the node name that applied to the profile openshift-node-hugepages")
tunedNodeNameStdOut, err = oc.AsAdmin().Run("get").Args("profiles.tuned.openshift.io", "-n", ntoNamespace, `-ojsonpath='{.items[?(@..status.tunedProfile=="openshift-node-hugepages")].metadata.name}'`).Output()
tunedNodeName = strings.Trim(tunedNodeNameStdOut, "'")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
exutil.By("Assert the value of cat /proc/cmdline, the expecte value should be hugepagesz=2M hugepages=50")
debugNodeStdout, err = oc.AsAdmin().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "cat", "/proc/cmdline").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The value of /proc/cmdline on node %v is: \n%v\n", tunedNodeName, debugNodeStdout)
o.Expect(debugNodeStdout).To(o.ContainSubstring("hugepagesz=2M hugepages=50"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
5a8e63bf-5a01-4327-af0c-1be10d1746ec
|
ROSA-NonHyperShiftHOST-Author:sahshah-Medium-64908-NTO Expose tuned socket interface.[Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-NonHyperShiftHOST-Author:sahshah-Medium-64908-NTO Expose tuned socket interface.[Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
exutil.By("Pick one worker node to label")
tunedNodeName, err := exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
//Clean up resources
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "-n", ntoNamespace, "tuning-maxpid").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning-").Execute()
//Label the node with node-role.kubernetes.io/worker-tuning
exutil.By("Label the node with node-role.kubernetes.io/worker-tuning=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//Apply new profile that match label node-role.kubernetes.io/worker-tuning=
exutil.By("Create tuning-maxpid profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, tuningMaxPidFile)
//NTO will provides two default tuned, one is default
exutil.By("Check the default tuned list, expected tuning-maxpid")
allTuneds, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("tuned", "-n", ntoNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(allTuneds).To(o.ContainSubstring("tuning-maxpid"))
exutil.By("Check if new profile tuning-maxpid applied to labeled node")
//Verify if the new profile is applied
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-maxpid")
profileCheck, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(profileCheck).To(o.Equal("tuning-maxpid"))
exutil.By("Get current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check the custom profile as expected by debugging the node ")
printfString := fmt.Sprintf(`printf '{"jsonrpc": "2.0", "method": "active_profile", "id": 1}' | nc -U /run/tuned/tuned.sock`)
printfStringStdOut, err := exutil.RemoteShPodWithBash(oc, ntoNamespace, tunedPodName, printfString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(printfStringStdOut).NotTo(o.BeEmpty())
o.Expect(printfStringStdOut).To(o.ContainSubstring("tuning-maxpid"))
e2e.Logf("printfStringStdOut is :\n%v", printfStringStdOut)
})
| |||||
test case
|
openshift/openshift-tests-private
|
e70743eb-6a31-454c-8f38-5e92ca7de595
|
ROSA-NonHyperShiftHOST-Author:liqcui-Medium-65371-NTO TuneD prevent from reverting node level profiles on termination [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("ROSA-NonHyperShiftHOST-Author:liqcui-Medium-65371-NTO TuneD prevent from reverting node level profiles on termination [Disruptive]", func() {
// test requires NTO to be installed
if !isNTO {
g.Skip("NTO is not installed - skipping test ...")
}
//Use the last worker node as labeled node
var (
tunedNodeName string
err error
)
isSNO := exutil.IsSNOCluster(oc)
if !isSNO {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
} else {
tunedNodeName, err = exutil.GetFirstLinuxWorkerNode(oc)
o.Expect(tunedNodeName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
oc.SetupProject()
ntoTestNS := oc.Namespace()
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "tuning-pidmax", "-n", ntoNamespace, "--ignore-not-found").Execute()
ntoRes := ntoResource{
name: "tuning-pidmax",
namespace: ntoNamespace,
template: ntoTunedPidMax,
sysctlparm: "kernel.pid_max",
sysctlvalue: "181818",
}
exutil.By("Label the node with node-role.kubernetes.io/worker-tuning=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create tuning-pidmax profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, ntoTunedPidMax)
exutil.By("Create tuning-pidmax profile tuning-pidmax applied to nodes")
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-pidmax", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
AppImageName := exutil.GetImagestreamImageName(oc, "tests")
clusterVersion, _, err := exutil.GetClusterVersion(oc)
e2e.Logf("Current clusterVersion is [ %v ]", clusterVersion)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(clusterVersion).NotTo(o.BeEmpty())
exutil.By("Create pod that deletect the value of kernel.pid_max ")
exutil.ApplyNsResourceFromTemplate(oc, ntoTestNS, "--ignore-unknown-parameters=true", "-f", podSysctlFile, "-p", "IMAGE_NAME="+AppImageName, "RUNASNONROOT=true")
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
//Check if sysctlpod pod is ready
exutil.AssertPodToBeReady(oc, "sysctlpod", ntoTestNS)
exutil.By("Get the sysctlpod status")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoTestNS, "pods").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The status of pod sysctlpod: \n%v", output)
exutil.By("Check the the value of kernel.pid_max in the pod sysctlpod, the expected value should be kernel.pid_max = 181818")
podLogStdout, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("sysctlpod", "--tail=1", "-n", ntoTestNS).Output()
e2e.Logf("Logs of sysctlpod before delete tuned pod is [ %v ]", podLogStdout)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podLogStdout).NotTo(o.BeEmpty())
o.Expect(podLogStdout).To(o.ContainSubstring("kernel.pid_max = 181818"))
exutil.By("Delete tuned pod on the labeled node, and make sure the kernel.pid_max don't revert to origin value")
o.Expect(oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", tunedPodName, "-n", ntoNamespace).Execute()).NotTo(o.HaveOccurred())
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check tuned pod status after delete tuned pod")
//Get the tuned pod name in the same node that labeled node
tunedPodName = getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
//Check if tuned pod that deleted is ready
exutil.AssertPodToBeReady(oc, tunedPodName, ntoNamespace)
exutil.By("Check the the value of kernel.pid_max in the pod sysctlpod again, the expected value still be kernel.pid_max = 181818")
podLogStdout, err = oc.AsAdmin().WithoutNamespace().Run("logs").Args("sysctlpod", "--tail=2", "-n", ntoTestNS).Output()
e2e.Logf("Logs of sysctlpod after delete tuned pod is [ %v ]", podLogStdout)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podLogStdout).NotTo(o.BeEmpty())
o.Expect(podLogStdout).To(o.ContainSubstring("kernel.pid_max = 181818"))
o.Expect(podLogStdout).NotTo(o.ContainSubstring("kernel.pid_max not equal 181818"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
129ec81b-6084-42fd-87ac-682d48f86dd2
|
Longduration-NonPreRelease-PreChkUpgrade-Author:liqcui-Medium-49618-TELCO N-1 - Pre Check for PAO shipped with NTO to support upgrade.[Telco][Disruptive][Slow].
|
['"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Longduration-NonPreRelease-PreChkUpgrade-Author:liqcui-Medium-49618-TELCO N-1 - Pre Check for PAO shipped with NTO to support upgrade.[Telco][Disruptive][Slow].", func() {
var (
paoBaseProfileMCP = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile-mcp.yaml")
paoBaseProfile = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile.yaml")
)
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
// currently test is only supported on AWS, GCP, Azure, ibmcloud, alibabacloud
supportPlatforms := []string{"aws", "gcp", "azure", "ibmcloud", "alibabacloud"}
if !exutil.ImplStringArrayContains(supportPlatforms, iaasPlatform) || isSNO {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
totalLinuxWorkerNode := exutil.CountLinuxWorkerNodeNumByOS(oc)
totalLinuxWorkerNodes := strconv.Itoa(totalLinuxWorkerNode)
if totalLinuxWorkerNode < 3 {
g.Skip("The total linux worker node is " + totalLinuxWorkerNodes + ". The OCP do not have enough worker node, skip it.")
}
tunedNodeName := choseOneWorkerNodeToRunCase(oc, 0)
//Get how many cpus on the specified worker node
exutil.By("Get the number of cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current cpus cores of worker node is %v", nodeCPUCoresInt)
if nodeCPUCoresInt < 4 {
g.Skip("the worker node doesn't have enough cpus - skipping test ...")
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Label the node with node-role.kubernetes.io/worker-pao=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create machine config pool worker-pao")
exutil.ApplyOperatorResourceByYaml(oc, "", paoBaseProfileMCP)
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-pao", 300)
// currently test is only supported on AWS, GCP, and Azure
ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
//Only GCP and AWS support realtime-kenel
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=true")
} else {
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=false")
}
exutil.By("Check Performance Profile pao-baseprofile was created automatically")
paoBasePerformanceProfile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(paoBasePerformanceProfile).NotTo(o.BeEmpty())
o.Expect(paoBasePerformanceProfile).To(o.ContainSubstring("pao-baseprofile"))
exutil.By("Assert if machine config pool applied to worker nodes that label with worker-pao")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-pao", 1800)
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 300)
exutil.AssertIfMCPChangesAppliedByName(oc, "master", 720)
exutil.By("Check openshift-node-performance-pao-baseprofile tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check current profile openshift-node-performance-pao-baseprofile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile openshift-node-performance-pao-baseprofile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-performance-pao-baseprofile")
exutil.By("Check if profile openshift-node-performance-pao-baseprofile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check value of allocatable.hugepages-1Gi in labled node ")
nodeHugePagesOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.allocatable.hugepages-1Gi}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeHugePagesOutput).To(o.ContainSubstring("1Gi"))
exutil.By("Check Settings of CPU Manager policy created by PAO in labled node ")
cpuManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep cpuManager").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerPolicy"))
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerReconcilePeriod"))
e2e.Logf("The settings of CPU Manager Policy on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of CPU Manager for reservedSystemCPUs created by PAO in labled node ")
cpuManagerConfOutput, err = oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep reservedSystemCPUs").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("reservedSystemCPUs"))
e2e.Logf("The settings of CPU Manager reservedSystemCPUs on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of Topology Manager for topologyManagerPolicy created by PAO in labled node ")
topologyManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep topologyManagerPolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(topologyManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(topologyManagerConfOutput).To(o.ContainSubstring("topologyManagerPolicy"))
e2e.Logf("The settings of CPU Manager topologyManagerPolicy on labeled nodes: \n%v", topologyManagerConfOutput)
// currently test is only supported on AWS, GCP, and Azure
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).To(o.Or(o.ContainSubstring("rt")))
} else {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).NotTo(o.Or(o.ContainSubstring("rt")))
}
exutil.By("Check runtimeClass setting that created by PAO ... ")
runtimeClassOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile", "pao-baseprofile", "-ojsonpath={.status.runtimeClass}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(runtimeClassOutput).NotTo(o.BeEmpty())
o.Expect(runtimeClassOutput).To(o.ContainSubstring("performance-pao-baseprofile"))
e2e.Logf("The settings of runtimeClass on labeled nodes: \n%v", runtimeClassOutput)
exutil.By("Check Kernel boot settings passed into /proc/cmdline in labled node ")
kernelCMDLineStdout, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "cat", "/proc/cmdline").Output()
e2e.Logf("The settings of Kernel boot passed into /proc/cmdline on labeled nodes: \n%v", kernelCMDLineStdout)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kernelCMDLineStdout).NotTo(o.BeEmpty())
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("tsc=reliable"))
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("isolcpus="))
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("hugepagesz=1G"))
//o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("nosmt"))
// - nosmt removed nosmt to improve succeed rate due to limited cpu cores
// but manually renabled when have enough cpu cores
})
| |||||
test case
|
openshift/openshift-tests-private
|
6fdfb9c3-1a40-420f-bd21-d039cde421d5
|
Longduration-NonPreRelease-PstChkUpgrade-Author:liqcui-Medium-49618-TELCO N-1 - Post Check for PAO shipped with NTO to support upgrade.[Telco][Disruptive][Slow].
|
['"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Longduration-NonPreRelease-PstChkUpgrade-Author:liqcui-Medium-49618-TELCO N-1 - Post Check for PAO shipped with NTO to support upgrade.[Telco][Disruptive][Slow].", func() {
if !isNTO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
isSNO := exutil.IsSNOCluster(oc)
// currently test is only supported on AWS, GCP, Azure, ibmcloud, alibabacloud
supportPlatforms := []string{"aws", "gcp", "azure", "ibmcloud", "alibabacloud"}
if !exutil.ImplStringArrayContains(supportPlatforms, iaasPlatform) || isSNO {
g.Skip("IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
totalLinuxWorkerNode := exutil.CountLinuxWorkerNodeNumByOS(oc)
totalLinuxWorkerNodes := strconv.Itoa(totalLinuxWorkerNode)
if totalLinuxWorkerNode < 3 {
g.Skip("The total linux worker node is " + totalLinuxWorkerNodes + ". The OCP do not have enough worker node, skip it.")
}
tunedNodeName, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker-pao", "-ojsonpath={.items[*].metadata.name}").Output()
if len(tunedNodeName) == 0 {
g.Skip("No labeled node was found, skipping testing ...")
} else {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao-").Execute()
}
defer exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-pao", "worker-pao", 480)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("performanceprofile", "pao-baseprofile", "--ignore-not-found").Execute()
exutil.By("Check If Performance Profile pao-baseprofile and cloud-provider exist during Post Check Phase")
paoBasePerformanceProfile, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile").Output()
if !strings.Contains(paoBasePerformanceProfile, "pao-baseprofile") {
g.Skip("No Performancerofile found skipping test ...")
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Assert if machine config pool applied for worker nodes")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-pao", 1200)
exutil.By("Check openshift-node-performance-pao-baseprofile tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check current profile openshift-node-performance-pao-baseprofile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile openshift-node-performance-pao-baseprofile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-performance-pao-baseprofile")
exutil.By("Check if profile openshift-node-performance-pao-baseprofile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check value of allocatable.hugepages-1Gi in labled node ")
nodeHugePagesOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.allocatable.hugepages-1Gi}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeHugePagesOutput).To(o.ContainSubstring("1Gi"))
exutil.By("Check Settings of CPU Manager policy created by PAO in labled node ")
cpuManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep cpuManager").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerPolicy"))
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("cpuManagerReconcilePeriod"))
e2e.Logf("The settings of CPU Manager Policy on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of CPU Manager for reservedSystemCPUs created by PAO in labled node ")
cpuManagerConfOutput, err = oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep reservedSystemCPUs").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cpuManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(cpuManagerConfOutput).To(o.ContainSubstring("reservedSystemCPUs"))
e2e.Logf("The settings of CPU Manager reservedSystemCPUs on labeled nodes: \n%v", cpuManagerConfOutput)
exutil.By("Check Settings of Topology Manager for topologyManagerPolicy created by PAO in labled node ")
topologyManagerConfOutput, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf |grep topologyManagerPolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(topologyManagerConfOutput).NotTo(o.BeEmpty())
o.Expect(topologyManagerConfOutput).To(o.ContainSubstring("topologyManagerPolicy"))
e2e.Logf("The settings of CPU Manager topologyManagerPolicy on labeled nodes: \n%v", topologyManagerConfOutput)
// currently test is only supported on AWS, GCP, and Azure
ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).To(o.Or(o.ContainSubstring("rt")))
} else {
exutil.By("Check realTime kernel setting that created by PAO in labled node ")
realTimekernalOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-owide").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(realTimekernalOutput).NotTo(o.BeEmpty())
o.Expect(realTimekernalOutput).NotTo(o.Or(o.ContainSubstring("rt")))
}
exutil.By("Check runtimeClass setting that created by PAO ... ")
runtimeClassOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile", "pao-baseprofile", "-ojsonpath={.status.runtimeClass}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(runtimeClassOutput).NotTo(o.BeEmpty())
o.Expect(runtimeClassOutput).To(o.ContainSubstring("performance-pao-baseprofile"))
e2e.Logf("The settings of runtimeClass on labeled nodes: \n%v", runtimeClassOutput)
exutil.By("Check Kernel boot settings passed into /proc/cmdline in labled node ")
kernelCMDLineStdout, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "cat", "/proc/cmdline").Output()
e2e.Logf("The settings of Kernel boot passed into /proc/cmdline on labeled nodes: \n%v", kernelCMDLineStdout)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kernelCMDLineStdout).NotTo(o.BeEmpty())
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("tsc=reliable"))
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("isolcpus="))
o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("hugepagesz=1G"))
//o.Expect(kernelCMDLineStdout).To(o.ContainSubstring("nosmt"))
// - nosmt removed nosmt to improve succeed rate due to limited cpu cores
// but manually renabled when have enough cpu cores
//The custom mc and mcp must be deleted by correct sequence, unlabel first and labeled node return to worker mcp, then delete mc and mcp
//otherwise the mcp will keep degrade state, it will affected other test case that use mcp
exutil.By("Delete custom MC and MCP by following correct logic ...")
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao-").Execute()
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-pao", "worker-pao", 480)
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 600)
})
| |||||
test case
|
openshift/openshift-tests-private
|
912a80cb-f94d-4c62-ba5a-a76190f3d597
|
NonPreRelease-PreChkUpgrade-Author:liqcui-Medium-21995-Pre Check for basic NTO function to Upgrade OCP Cluster[Disruptive].
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("NonPreRelease-PreChkUpgrade-Author:liqcui-Medium-21995-Pre Check for basic NTO function to Upgrade OCP Cluster[Disruptive].", func() {
// currently test is only supported on AWS, GCP, Azure, ibmcloud, alibabacloud
supportPlatforms := []string{"aws", "gcp", "azure", "ibmcloud", "alibabacloud"}
if !exutil.ImplStringArrayContains(supportPlatforms, iaasPlatform) || !isNTO {
g.Skip("NTO is not installed or IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
tunedNodeName := choseOneWorkerNodeToRunCase(oc, 1)
paoNodeName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker-pao", "-ojsonpath={.items[*].metadata.name}").Output()
if len(tunedNodeName) == 0 || tunedNodeName == paoNodeName {
g.Skip("No suitable worker node was found in : " + iaasPlatform + " - skipping test ...")
}
exutil.By("Label the node with node-role.kubernetes.io/worker-tuning=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
ntoRes := ntoResource{
name: "tuning-pidmax",
namespace: ntoNamespace,
template: ntoSysctlTemplate,
sysctlparm: "kernel.pid_max",
sysctlvalue: "282828",
label: "node-role.kubernetes.io/worker-tuning",
}
exutil.By("Create tuning-pidmax profile")
ntoRes.applyNTOTunedProfile(oc)
exutil.By("Create tuning-pidmax profile tuning-pidmax applied to nodes")
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-pidmax", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Compare if the value kernel.pid_max in on labeled node, should be 282828")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.pid_max", "282828")
exutil.By("Get cloud provider name ...")
providerName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("profiles.tuned.openshift.io", tunedNodeName, "-n", ntoNamespace, "-ojsonpath={.spec.config.providerName}").Output()
o.Expect(providerName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
providerID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.spec.providerID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(providerID).NotTo(o.BeEmpty())
o.Expect(providerID).To(o.ContainSubstring(providerName))
exutil.By("Apply cloud-provider profile ...")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cloudProviderFile, "-p", "PROVIDER_NAME="+providerName)
exutil.By("Check provider + providerName profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(tunedNames).To(o.ContainSubstring("provider-" + providerName))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value is 16386")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "vm.admin_reserve_kbytes", "16386")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
6436edad-906f-493e-b203-25d837e60d38
|
NonPreRelease-PstChkUpgrade-Author:liqcui-Medium-21995-Post Check for basic NTO function to Upgrade OCP Cluster[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("NonPreRelease-PstChkUpgrade-Author:liqcui-Medium-21995-Post Check for basic NTO function to Upgrade OCP Cluster[Disruptive]", func() {
// currently test is only supported on AWS, GCP, Azure, ibmcloud, alibabacloud
supportPlatforms := []string{"aws", "gcp", "azure", "ibmcloud", "alibabacloud"}
if !exutil.ImplStringArrayContains(supportPlatforms, iaasPlatform) || !isNTO {
g.Skip("NTO is not installed or IAAS platform: " + iaasPlatform + " is not automated yet - skipping test ...")
}
tunedNodeName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker-tuning", "-ojsonpath={.items[*].metadata.name}").Output()
if len(tunedNodeName) == 0 {
g.Skip("No suitable worker node was found in : " + iaasPlatform + " - skipping test ...")
}
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning-").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "tuning-pidmax", "-n", ntoNamespace, "--ignore-not-found").Execute()
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Get cloud provider name ...")
providerName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("profiles.tuned.openshift.io", tunedNodeName, "-n", ntoNamespace, "-ojsonpath={.spec.config.providerName}").Output()
o.Expect(providerName).NotTo(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "provider-"+providerName, "-n", ntoNamespace, "--ignore-not-found").Execute()
ntoRes := ntoResource{
name: "tuning-pidmax",
namespace: ntoNamespace,
template: ntoSysctlTemplate,
sysctlparm: "kernel.pid_max",
sysctlvalue: "282828",
label: "node-role.kubernetes.io/worker-tuning",
}
exutil.By("Create tuning-pidmax profile and apply it to nodes")
ntoRes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "tuning-pidmax", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Compare if the value kernel.pid_max in on labeled node, should be 282828")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.pid_max", "282828")
providerID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.spec.providerID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(providerID).NotTo(o.BeEmpty())
o.Expect(providerID).To(o.ContainSubstring(providerName))
exutil.By("Apply cloud-provider profile ...")
exutil.ApplyNsResourceFromTemplate(oc, ntoNamespace, "--ignore-unknown-parameters=true", "-f", cloudProviderFile, "-p", "PROVIDER_NAME="+providerName)
exutil.By("Check provider + providerName profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).NotTo(o.BeEmpty())
o.Expect(tunedNames).To(o.ContainSubstring("provider-" + providerName))
exutil.By("Check current profile for each node")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check the value of vm.admin_reserve_kbytes on target nodes, the expected value is 16386")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "vm.admin_reserve_kbytes", "16386")
//Clean nto resource after upgrade
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-tuning-").Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "tuning-pidmax", "-n", ntoNamespace, "--ignore-not-found").Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "provider-"+providerName, "-n", ntoNamespace, "--ignore-not-found").Execute()
})
| ||||||
test case
|
openshift/openshift-tests-private
|
5f3e3407-1c25-4e94-a8da-ad616c5bef8e
|
Author:liqcui-Medium-74507-NTO openshift-node-performance-uuid have the same priority warning keeps printing[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Author:liqcui-Medium-74507-NTO openshift-node-performance-uuid have the same priority warning keeps printing[Disruptive]", func() {
isSNO := exutil.IsSNOCluster(oc)
var firstNodeName string
var secondNodeName string
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
machinesetName := getTotalLinuxMachinesetNum(oc)
e2e.Logf("len(machinesetName) is %v", machinesetName)
if machinesetName > 1 {
firstNodeName = choseOneWorkerNodeToRunCase(oc, 0)
secondNodeName = choseOneWorkerNodeToRunCase(oc, 1)
} else {
firstNodeName = choseOneWorkerNodeNotByMachineset(oc, 0)
secondNodeName = choseOneWorkerNodeNotByMachineset(oc, 1)
}
firstNodeLabel := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker-tuning")
secondNodeLabel := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker-priority18")
if len(firstNodeLabel) == 0 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", firstNodeName, "node-role.kubernetes.io/worker-tuning-").Execute()
}
if len(secondNodeLabel) == 0 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", secondNodeName, "node-role.kubernetes.io/worker-priority18-").Execute()
}
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "tuning-pidmax", "-n", ntoNamespace, "--ignore-not-found").Execute()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "tuning-dirtyratio", "-n", ntoNamespace, "--ignore-not-found").Execute()
//Get the tuned pod name in the same node that labeled node
ntoOperatorPodName := getNTOOperatorPodName(oc, ntoNamespace)
o.Expect(ntoOperatorPodName).NotTo(o.BeEmpty())
exutil.By("Pickup two worker nodes to label node to worker-tuning and worker-priority18 ...")
if len(firstNodeLabel) == 0 {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", firstNodeName, "node-role.kubernetes.io/worker-tuning=").Execute()
}
if len(secondNodeLabel) == 0 {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", secondNodeName, "node-role.kubernetes.io/worker-priority18=").Execute()
}
firstNTORes := ntoResource{
name: "tuning-pidmax",
namespace: ntoNamespace,
template: ntoSysctlTemplate,
sysctlparm: "kernel.pid_max",
sysctlvalue: "282828",
label: "node-role.kubernetes.io/worker-tuning",
}
secondNTORes := ntoResource{
name: "tuning-dirtyratio",
namespace: ntoNamespace,
template: ntoSysctlTemplate,
sysctlparm: "vm.dirty_ratio",
sysctlvalue: "56",
label: "node-role.kubernetes.io/worker-priority18",
}
exutil.By("Create tuning-pidmax profile")
firstNTORes.applyNTOTunedProfile(oc)
exutil.By("Create tuning-dirtyratio profile")
secondNTORes.applyNTOTunedProfile(oc)
exutil.By("Create tuning-pidmax profile and apply it to nodes")
firstNTORes.assertIfTunedProfileApplied(oc, ntoNamespace, firstNodeName, "tuning-pidmax", "True")
exutil.By("Create tuning-dirtyratio profile and apply it to nodes")
secondNTORes.assertIfTunedProfileApplied(oc, ntoNamespace, secondNodeName, "tuning-dirtyratio", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Compare if the value kernel.pid_max in on labeled node, should be 282828")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, firstNodeName, "kernel.pid_max", "282828")
exutil.By("Compare if the value kernel.pid_max in on labeled node, should be 282828")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, secondNodeName, "vm.dirty_ratio", "56")
exutil.By("Assert the log contains recommended profile (nf-conntrack-max) matches current configuratio ")
ntoOperatorPodLogs, _ := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", ntoNamespace, ntoOperatorPodName, "--tail=50").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ntoOperatorPodLogs).NotTo(o.BeEmpty())
o.Expect(ntoOperatorPodLogs).NotTo(o.ContainSubstring("same priority"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
1e0cc28e-1fe7-4fa2-8d25-358a7b8b8254
|
Author:liqcui-Longduration-NonPreRelease-Medium-75555-NTO Tuned pod should starts before workload pods on reboot[Disruptive][Slow].
|
['"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Author:liqcui-Longduration-NonPreRelease-Medium-75555-NTO Tuned pod should starts before workload pods on reboot[Disruptive][Slow].", func() {
var (
paoBaseProfileMCP = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile-mcp.yaml")
paoBaseProfile = exutil.FixturePath("testdata", "psap", "pao", "pao-baseprofile.yaml")
)
// // test requires NTO to be installed
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
skipPAODeploy := skipDeployPAO(oc)
isPAOInstalled = exutil.IsPAOInstalled(oc)
if skipPAODeploy || isPAOInstalled {
e2e.Logf("PAO has been installed and continue to execute test case")
} else {
isPAOInOperatorHub := exutil.IsPAOInOperatorHub(oc)
if !isPAOInOperatorHub {
g.Skip("PAO is not in OperatorHub - skipping test ...")
}
exutil.InstallPAO(oc, paoNamespace)
}
//Prior to choose worker nodes with machineset
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
//Get how many cpus on the specified worker node
exutil.By("Get how many cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCPUCoresInt <= 1 {
g.Skip("the worker node don't have enough cpus - skipping test ...")
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
//Re-delete mcp,mc, performance and unlabel node, just in case the test case broken before clean up steps
defer func() {
exutil.DeleteMCAndMCPByName(oc, "50-nto-worker-pao", "worker-pao", 480)
oc.AsAdmin().WithoutNamespace().Run("delete").Args("performanceprofile", "pao-baseprofile", "--ignore-not-found").Execute()
}()
labeledNode := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker-pao")
if len(labeledNode) == 0 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao-").Execute()
exutil.By("Label the node with node-role.kubernetes.io/worker-pao=")
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-pao=", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// currently test is only supported on AWS, GCP, and Azure
ocpArch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.nodeInfo.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if (iaasPlatform == "aws" || iaasPlatform == "gcp") && ocpArch == "amd64" {
//Only GCP and AWS support realtime-kenel
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=true")
} else {
exutil.By("Apply pao-baseprofile performance profile")
exutil.ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoBaseProfile, "-p", "ISENABLED=false")
}
exutil.By("Check Performance Profile pao-baseprofile was created automatically")
paoBasePerformanceProfile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("performanceprofile").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(paoBasePerformanceProfile).NotTo(o.BeEmpty())
o.Expect(paoBasePerformanceProfile).To(o.ContainSubstring("pao-baseprofile"))
exutil.By("Create machine config pool worker-pao")
exutil.ApplyOperatorResourceByYaml(oc, "", paoBaseProfileMCP)
exutil.By("Assert if machine config pool applied for worker nodes")
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-pao", 1200)
exutil.By("Check openshift-node-performance-pao-baseprofile tuned profile should be automatically created")
tunedNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "tuned").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(tunedNames).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
exutil.By("Check current profile openshift-node-performance-pao-baseprofile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Check if new NTO profile openshift-node-performance-pao-baseprofile was applied")
assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "openshift-node-performance-pao-baseprofile")
exutil.By("Check if profile openshift-node-performance-pao-baseprofile applied on nodes")
nodeProfileName, err := getTunedProfile(oc, ntoNamespace, tunedNodeName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeProfileName).To(o.ContainSubstring("openshift-node-performance-pao-baseprofile"))
//$ systemctl status ocp-tuned-one-shot.service
// ocp-tuned-one-shot.service - TuneD service from NTO image
// ..
// Active: inactive (dead) since Thu 2024-06-20 14:29:32 UTC; 5min ago
// notice the tuned in one shot started and finished before kubelet
//Return an error when the systemctl status ocp-tuned-one-shot.service is inactive, so err for o.Expect as expected.
exutil.By("Check if end time of ocp-tuned-one-shot.service prior to startup time of kubelet service")
//supported property name
// 0.InactiveExitTimestampMonotonic
// 1.ExecMainStartTimestampMonotonic
// 2.ActiveEnterTimestampMonotonic
// 3.StateChangeTimestampMonotonic
// 4.ActiveExitTimestampMonotonic
// 5.InactiveEnterTimestampMonotonic
// 6.ConditionTimestampMonotonic
// 7.AssertTimestampMonotonic
inactiveExitTimestampMonotonicOfOCPTunedOneShotService := exutil.ShowSystemctlPropertyValueOfServiceUnitByName(oc, tunedNodeName, ntoNamespace, "ocp-tuned-one-shot.service", "InactiveExitTimestampMonotonic")
ocpTunedOneShotServiceStatusInactiveExitTimestamp := exutil.GetSystemctlServiceUnitTimestampByPropertyNameWithMonotonic(inactiveExitTimestampMonotonicOfOCPTunedOneShotService)
execMainStartTimestampMonotonicOfKubelet := exutil.ShowSystemctlPropertyValueOfServiceUnitByName(oc, tunedNodeName, ntoNamespace, "kubelet.service", "ExecMainStartTimestampMonotonic")
kubeletServiceStatusExecMainStartTimestamp := exutil.GetSystemctlServiceUnitTimestampByPropertyNameWithMonotonic(execMainStartTimestampMonotonicOfKubelet)
e2e.Logf("ocpTunedOneShotServiceStatusInactiveExitTimestamp is: %v, kubeletServiceStatusActiveEnterTimestamp is: %v", ocpTunedOneShotServiceStatusInactiveExitTimestamp, kubeletServiceStatusExecMainStartTimestamp)
o.Expect(kubeletServiceStatusExecMainStartTimestamp).To(o.BeNumerically(">", ocpTunedOneShotServiceStatusInactiveExitTimestamp))
})
| |||||
test case
|
openshift/openshift-tests-private
|
4a302146-3192-4abd-b2df-ee12cb70ff86
|
Author:liqcui-Longduration-NonPreRelease-Medium-75435-NTO deferred feature with annotation deferred update[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Author:liqcui-Longduration-NonPreRelease-Medium-75435-NTO deferred feature with annotation deferred update[Disruptive]", func() {
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
machinesetName := getTotalLinuxMachinesetNum(oc)
e2e.Logf("len(machinesetName) is %v", machinesetName)
if machinesetName > 1 {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName = choseOneWorkerNodeNotByMachineset(oc, 0)
}
labeledNode := exutil.GetNodeListByLabel(oc, "deferred-update")
if len(labeledNode) == 0 {
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "deferred-update-").Execute()
}
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("tuned", "deferred-update-profile", "-n", ntoNamespace, "--ignore-not-found").Execute()
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "4096")
}()
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Pickup one worker nodes to label node to deferred-update ...")
if len(labeledNode) == 0 {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "deferred-update=").Execute()
}
defferedNTORes := ntoResource{
name: "deferred-update-profile",
namespace: ntoNamespace,
template: ntoDefered,
sysctlparm: "kernel.shmmni",
sysctlvalue: "8192",
label: "deferred-update",
deferedValue: "update",
}
exutil.By("Create deferred-update profile")
defferedNTORes.applyNTOTunedProfileWithDeferredAnnotation(oc)
exutil.By("Create deferred-update profile and apply it to nodes")
defferedNTORes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "deferred-update-profile", "True")
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
exutil.By("Compare if the value kernel.shmmni in on labeled node, should be 8192")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "8192")
exutil.By("Path tuned with new value of kernel.shmmni to 10240")
patchTunedProfile(oc, ntoNamespace, "deferred-update-profile", ntoDeferedUpdatePatch)
exutil.By("Path the tuned profile with a new value, the new value take effective after node reboot")
defferedNTORes.assertIfTunedProfileApplied(oc, ntoNamespace, tunedNodeName, "deferred-update-profile", "False")
output, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profile.tuned.openshift.io", tunedNodeName, `-ojsonpath='{.status.conditions[0].message}'`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).NotTo(o.BeEmpty())
o.Expect(output).To(o.ContainSubstring("The TuneD daemon profile is waiting for the next node restart"))
exutil.By("Reboot the node with updated tuned profile")
err = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ntoNamespace, "-it", tunedPodName, "--", "reboot").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 600)
exutil.By("Compare if the value kernel.shmmni in on labeled node, should be 10240")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "10240")
exutil.By("Removed deffered tuned custom profile and unlabel node")
defferedNTORes.delete(oc)
exutil.By("Compare if the value kernel.shmmni in on labeled node, it will rollback to 4096")
compareSpecifiedValueByNameOnLabelNodewithRetry(oc, ntoNamespace, tunedNodeName, "kernel.shmmni", "4096")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
72fca672-8330-43d7-8b9a-fd3f405fd957
|
Author:liqcui-Longduration-NonPreRelease-Medium-77764-NTO - Failure to pull NTO image preventing startup of ocp-tuned-one-shot.service[Disruptive]
|
['"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/psap/nto/nto.go
|
g.It("Author:liqcui-Longduration-NonPreRelease-Medium-77764-NTO - Failure to pull NTO image preventing startup of ocp-tuned-one-shot.service[Disruptive]", func() {
isSNO := exutil.IsSNOCluster(oc)
if !isNTO || isSNO {
g.Skip("NTO is not installed or is Single Node Cluster- skipping test ...")
}
var (
ntoDisableHttpsMCPFile = exutil.FixturePath("testdata", "psap", "nto", "disable-https-mcp.yaml")
ntoDisableHttpsPPFile = exutil.FixturePath("testdata", "psap", "nto", "disable-https-pp.yaml")
)
proxyStdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-ojsonpath={.spec.httpsProxy}").Output()
e2e.Logf("proxyStdOut is %v", proxyStdOut)
o.Expect(err).NotTo(o.HaveOccurred())
if len(proxyStdOut) == 0 {
g.Skip("No proxy in the cluster - skipping test ...")
}
machinesetName := getTotalLinuxMachinesetNum(oc)
e2e.Logf("len(machinesetName) is %v", machinesetName)
if machinesetName > 1 {
tunedNodeName = choseOneWorkerNodeToRunCase(oc, 0)
} else {
tunedNodeName = choseOneWorkerNodeNotByMachineset(oc, 0)
}
//Get how many cpus on the specified worker node
exutil.By("Get how many cpus cores on the labeled worker node")
nodeCPUCores, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", tunedNodeName, "-ojsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeCPUCores).NotTo(o.BeEmpty())
nodeCPUCoresInt, err := strconv.Atoi(nodeCPUCores)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeCPUCoresInt <= 1 {
g.Skip("the worker node don't have enough cpus - skipping test ...")
}
labeledNode := exutil.GetNodeListByLabel(oc, "node-role.kubernetes.io/worker-nohttps")
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("mcp", "worker-nohttps", "--ignore-not-found").Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("PerformanceProfile", "performance", "-n", ntoNamespace, "--ignore-not-found").Execute()
}()
if len(labeledNode) == 0 {
defer func() {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-nohttps-").Execute()
//make sure labeled node return to worker mcp
exutil.AssertIfMCPChangesAppliedByName(oc, "worker", 720)
}()
}
//Get the tuned pod name in the same node that labeled node
tunedPodName := getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
o.Expect(tunedPodName).NotTo(o.BeEmpty())
exutil.By("Pickup one worker nodes to label node to worker-nohttps ...")
if len(labeledNode) == 0 {
oc.AsAdmin().WithoutNamespace().Run("label").Args("node", tunedNodeName, "node-role.kubernetes.io/worker-nohttps=").Execute()
}
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, ntoDisableHttpsMCPFile)
exutil.By("Remove NTO image on label node")
stdOut, _ := exutil.DebugNodeRetryWithOptionsAndChroot(oc, tunedNodeName, []string{"-q"}, "/bin/bash", "-c", ". /var/lib/ocp-tuned/image.env;podman rmi $NTO_IMAGE --force")
e2e.Logf("removed NTO image is %v", stdOut)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Apply pao performance profile")
exutil.ApplyOperatorResourceByYaml(oc, ntoNamespace, ntoDisableHttpsPPFile)
exutil.AssertIfMCPChangesAppliedByName(oc, "worker-nohttps", 720)
exutil.By("Check current profile for each node")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", ntoNamespace, "profiles.tuned.openshift.io").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Current profile for each node: \n%v", output)
//Inactive status mean error in systemctl status ocp-tuned-one-shot.service, that's expected
exutil.By("Check systemctl status ocp-tuned-one-shot.service, Active: inactive is expected")
stdOut, _ = oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", ntoNamespace, "--quiet=true", "node/"+tunedNodeName, "--", "chroot", "/host", "systemctl", "status", "ocp-tuned-one-shot.service").Output()
o.Expect(stdOut).To(o.ContainSubstring("ocp-tuned-one-shot.service: Deactivated successfully"))
exutil.By("Check systemctl status kubelet, Active: active (running) is expected")
stdOut, err = exutil.DebugNodeRetryWithOptionsAndChroot(oc, tunedNodeName, []string{"-q"}, "systemctl", "status", "kubelet")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stdOut).To(o.ContainSubstring("Active: active (running)"))
exutil.By("Remove NTO image on label node and delete tuned pod, the image can pull successfully")
stdOut, err = exutil.DebugNodeRetryWithOptionsAndChroot(oc, tunedNodeName, []string{"-q"}, "/bin/bash", "-c", ". /var/lib/ocp-tuned/image.env;podman rmi $NTO_IMAGE --force")
e2e.Logf("removed NTO image is %v", stdOut)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ntoNamespace, "pod", tunedPodName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//Get the tuned pod name in the same node that labeled node again
tunedPodName = getTunedPodNamebyNodeName(oc, tunedNodeName, ntoNamespace)
exutil.AssertPodToBeReady(oc, tunedPodName, ntoNamespace)
podDescOuput, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("-n", ntoNamespace, "pod", tunedPodName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podDescOuput).To(o.ContainSubstring("Successfully pulled image"))
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.