element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function
|
openshift/openshift-tests-private
|
7817de0c-1c96-4edc-add5-34fc27f7cf2a
|
GetAPIVIPOnCluster
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func GetAPIVIPOnCluster(oc *exutil.CLI) string {
apiVIP := ""
var err error
o.Eventually(func() error {
apiVIP, err = oc.WithoutNamespace().AsAdmin().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.baremetal.apiServerInternalIP}").Output()
return err
}, "60s", "5s").ShouldNot(o.HaveOccurred())
return apiVIP
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
61451886-cc55-4643-93fd-22f22dc666d5
|
createHttpservePodNodeByAdmin
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['httpserverPodResourceNode']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func (pod *httpserverPodResourceNode) createHttpservePodNodeByAdmin(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "CONTAINERPORT="+strconv.Itoa(int(pod.containerport)), "HOSTPORT="+strconv.Itoa(int(pod.hostport)), "NODENAME="+pod.nodename)
if err1 != nil {
e2e.Logf("the err:%v, and try next round", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create pod %v", pod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
e7b9e357-da99-4ce5-a77b-91e4f96bd8d6
|
CurlPod2NodePass
|
['"net"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func CurlPod2NodePass(oc *exutil.CLI, namespaceSrc, podNameSrc, nodeNameDst, DstHostPort string) {
nodeIP2, nodeIP1 := getNodeIP(oc, nodeNameDst)
if nodeIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP1, DstHostPort))
o.Expect(err).NotTo(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP2, DstHostPort))
o.Expect(err).NotTo(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP1, DstHostPort))
o.Expect(err).NotTo(o.HaveOccurred())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
7cde41a7-968f-4cbf-bd86-deb8417dd451
|
CurlPod2NodeFail
|
['"net"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func CurlPod2NodeFail(oc *exutil.CLI, namespaceSrc, podNameSrc, nodeNameDst, DstHostPort string) {
nodeIP2, nodeIP1 := getNodeIP(oc, nodeNameDst)
if nodeIP2 != "" {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP1, DstHostPort))
o.Expect(err).To(o.HaveOccurred())
_, err = e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP2, DstHostPort))
o.Expect(err).To(o.HaveOccurred())
} else {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(nodeIP1, DstHostPort))
o.Expect(err).To(o.HaveOccurred())
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
be59776d-90ac-4294-862e-5cc80b042856
|
CurlPod2HostPass
|
['"net"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func CurlPod2HostPass(oc *exutil.CLI, namespaceSrc, podNameSrc, hostip, DstHostPort string) {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(hostip, DstHostPort))
o.Expect(err).NotTo(o.HaveOccurred())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
5ea1075c-97b2-4d8d-99b0-03047d070a4e
|
CurlPod2HostFail
|
['"net"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func CurlPod2HostFail(oc *exutil.CLI, namespaceSrc, podNameSrc, hostip, DstHostPort string) {
_, err := e2eoutput.RunHostCmd(namespaceSrc, podNameSrc, "curl -I --connect-timeout 5 -s "+net.JoinHostPort(hostip, DstHostPort))
o.Expect(err).To(o.HaveOccurred())
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
2f5aae3b-d36c-4a26-a46a-fbe75d0e998b
|
checkFips
|
['"io"', '"os"', '"strings"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func checkFips(oc *exutil.CLI) bool {
node, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "--selector=node-role.kubernetes.io/worker,kubernetes.io/os=linux", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
fipsInfo, err := exutil.DebugNodeWithChroot(oc, node, "bash", "-c", "fips-mode-setup --check")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(fipsInfo, "FIPS mode is disabled.") {
e2e.Logf("FIPS is not enabled.")
return false
}
e2e.Logf("FIPS is enabled.")
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
7ee33b2f-c5dc-4bb4-89cd-6eb2245ec913
|
checkIPv6PublicAccess
|
['"strings"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func checkIPv6PublicAccess(oc *exutil.CLI) bool {
workNode, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
curlCMD := "curl -6 www.google.com --connect-timeout 5 -I"
output, err := exutil.DebugNode(oc, workNode, "bash", "-c", curlCMD)
if !strings.Contains(output, "HTTP") || err != nil {
e2e.Logf(output)
e2e.Logf("Unable to access the public Internet with IPv6 from the cluster.")
return false
}
e2e.Logf("Successfully connected to the public Internet with IPv6 from the cluster.")
return true
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
2ae8058b-496d-4ba7-bdcc-265f95673a31
|
forceRebootNode
|
['e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func forceRebootNode(oc *exutil.CLI, nodeName string) {
e2e.Logf("\nRebooting node %s....", nodeName)
runCmd, _, _, runCmdErr := oc.AsAdmin().Run("debug").Args("node/"+nodeName, "--", "chroot", "/host", "reboot", "--force").Background()
defer runCmd.Process.Kill()
o.Expect(runCmdErr).NotTo(o.HaveOccurred())
waitForNetworkOperatorState(oc, 100, 15, "True.*False.*False")
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
43dc70cc-36e1-4f6f-b359-dc1b2ef94f8e
|
createResourceFromFileWithError
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func createResourceFromFileWithError(oc *exutil.CLI, ns, file string) error {
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", file, "-n", ns).Execute()
return err
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
62bf5cde-2126-4af5-b127-dee47e9f62dc
|
createCustomResponsePod
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['customResponsePodResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func (pod *customResponsePodResource) createCustomResponsePod(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace,
"LABELKEY="+pod.labelKey, "LABELVAL="+pod.labelVal,
"RESPONSESTR="+pod.responseStr)
if err1 != nil {
e2e.Logf("the err:%v, and try again...", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create pod %s due to %v", pod.name, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
ec7167e3-2484-4dd4-98ae-957b4616c2f1
|
createSessionAffiniltyService
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['sessionAffinityServiceResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func (svc *sessionAffinityServiceResource) createSessionAffiniltyService(oc *exutil.CLI) {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", svc.template, "-p", "NAME="+svc.name, "NAMESPACE="+svc.namespace,
"IPFAMILYPOLICY="+svc.ipFamilyPolicy, "SELLABELKEY="+svc.selLabelKey, "SELLABELVAL="+svc.SelLabelVal)
if err1 != nil {
e2e.Logf("the err:%v, and try again...", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create pservice %s due to %v", svc.name, err))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
1d76356e-abb6-4917-b10b-9cc6ea26fbf4
|
getEnabledFeatureGates
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func getEnabledFeatureGates(oc *exutil.CLI) ([]string, error) {
enabledFeatureGates, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.status.featureGates[0].enabled[*].name}").Output()
if err != nil {
return nil, err
}
return strings.Split(enabledFeatureGates, " "), nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
5c7328a6-bb45-4476-b704-5f80c1abeb45
|
IsFeaturegateEnabled
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func IsFeaturegateEnabled(oc *exutil.CLI, featuregate string) (bool, error) {
enabledFeatureGates, err := getEnabledFeatureGates(oc)
if err != nil {
return false, err
}
for _, f := range enabledFeatureGates {
if f == featuregate {
return true, nil
}
}
return false, nil
}
|
networking
| |||||
function
|
openshift/openshift-tests-private
|
578565db-d930-44fd-8432-055012cc5a64
|
SkipIfNoFeatureGate
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func SkipIfNoFeatureGate(oc *exutil.CLI, featuregate string) {
enabled, err := IsFeaturegateEnabled(oc, featuregate)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting enabled featuregates")
if !enabled {
g.Skip(fmt.Sprintf("Featuregate %s is not enabled in this cluster", featuregate))
}
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
3616b446-3d89-4f54-9992-4d9f20608cda
|
createVRF
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
['VRFResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func (vrf *VRFResource) createVRF(oc *exutil.CLI) error {
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", vrf.template, "-p", "NAME="+vrf.name, "INTFNAME="+vrf.intfname, "NODENAME="+vrf.nodename, "TABLEID="+strconv.Itoa(int(vrf.tableid)))
if err1 != nil {
e2e.Logf("Creating VRF on the node failed :%v, and try next round", err1)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("fail to create VRF on the node %v", vrf.name)
}
return nil
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
ffeddbcd-5d59-44e2-920e-abcabd6ca116
|
createNamedPortPod
|
['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['namedPortPodResource']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func (namedPortPod *namedPortPodResource) createNamedPortPod(oc *exutil.CLI) {
exutil.By("Creating named port pod from template")
err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err1 := applyResourceFromTemplateByAdmin(oc, "--ignore-unknown-parameters=true", "-f", namedPortPod.template, "-p", "NAME="+namedPortPod.name,
"NAMESPACE="+namedPortPod.namespace, "PODLABELKEY="+namedPortPod.podLabelKey, "PODLABELVAL="+namedPortPod.podLabelVal,
"PORTNAME="+namedPortPod.portname, "CONTAINERPORT="+strconv.Itoa(int(namedPortPod.containerport)))
if err1 != nil {
e2e.Logf("Error creating resource:%v, and trying again", err1)
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Failed to create named port pod %v", namedPortPod.name))
}
|
networking
| |||
function
|
openshift/openshift-tests-private
|
0efb6fbb-ca6a-4884-b01e-63cc821f6451
|
getTcpdumpOnNodeCmdFromPod
|
['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func getTcpdumpOnNodeCmdFromPod(oc *exutil.CLI, nodeName, tcpdumpCmd, namespace, podname, cmdOnPod string) string {
exutil.By("Enable tcpdump on node")
cmdTcpdump, cmdOutput, _, err := oc.AsAdmin().Run("debug").Args("-n", "default", "node/"+nodeName, "--", "bash", "-c", tcpdumpCmd).Background()
defer cmdTcpdump.Process.Kill()
o.Expect(err).NotTo(o.HaveOccurred())
//Wait 5 seconds to let the tcpdump ready for capturing traffic
time.Sleep(5 * time.Second)
exutil.By("Curl external host:port from test pods")
var tcpdumpErr error = nil
checkErr := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 60*time.Second, false, func(cxt context.Context) (bool, error) {
_, curlErr := e2eoutput.RunHostCmd(namespace, podname, cmdOnPod)
if curlErr == nil {
tcpdumpErr = cmdTcpdump.Wait()
e2e.Logf("The captured tcpdump outout is: \n%s\n", cmdOutput.String())
}
if curlErr != nil || tcpdumpErr != nil {
e2e.Logf("Getting error at executing curl command: %v or at waiting for tcpdump: %v, try again ...", curlErr, tcpdumpErr)
return false, nil
}
if cmdOutput.String() == "" {
e2e.Logf("Did not capture tcpdump packets,try again ...")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(checkErr, fmt.Sprintf("Unable to get tcpdump when curling from pod:%s from namespace: %s", podname, namespace))
cmdTcpdump.Process.Kill()
return cmdOutput.String()
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
bd9c8b9a-f203-4c7f-93ff-72374965388f
|
collectMustGather
|
['e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func collectMustGather(oc *exutil.CLI, dstDir string, imageStream string, parameters []string) (string, error) {
args := []string{"must-gather"}
if dstDir != "" {
args = append(args, "--dest-dir="+dstDir)
}
if imageStream != "" {
args = append(args, "--image-stream="+imageStream)
}
if len(parameters) > 0 {
args = append(args, "--")
for _, param := range parameters {
args = append(args, param)
}
}
output, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args(args...).Output()
if err != nil {
e2e.Logf("collect must-gather failed, err: %v", err)
return "", err
}
return output, nil
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
c79a35c7-fbf1-46d7-8fb6-d3bf73b555fe
|
verifyPodConnCrossNodes
|
['"net"', '"path/filepath"', 'netutils "k8s.io/utils/net"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func verifyPodConnCrossNodes(oc *exutil.CLI) bool {
buildPruningBaseDir := exutil.FixturePath("testdata", "networking")
helloDaemonset := filepath.Join(buildPruningBaseDir, "hello-pod-daemonset.yaml")
pass := true
exutil.By("Create a temporay project for pods to pods connection checking.")
oc.SetupProject()
ns := oc.Namespace()
exutil.By("Create hello-pod-daemonset in namespace.")
createResourceFromFile(oc, ns, helloDaemonset)
err := waitForPodWithLabelReady(oc, ns, "name=hello-pod")
exutil.AssertWaitPollNoErr(err, "ipsec pods are not ready after killing pluto")
exutil.By("Checking pods connection")
pods := getPodName(oc, ns, "name=hello-pod")
for _, srcPod := range pods {
for _, targetPod := range pods {
if targetPod != srcPod {
podIP1, podIP2 := getPodIP(oc, ns, targetPod)
e2e.Logf("Curling from pod: %s with IP: %s\n", srcPod, podIP1)
_, err := e2eoutput.RunHostCmd(ns, srcPod, "curl --connect-timeout 10 -s "+net.JoinHostPort(podIP1, "8080"))
if err != nil {
e2e.Logf("pods connection failed from %s to %s:8080", srcPod, podIP1)
srcNode, err := exutil.GetPodNodeName(oc, ns, srcPod)
o.Expect(err).NotTo(o.HaveOccurred())
dstnode, err := exutil.GetPodNodeName(oc, ns, targetPod)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("pods connection failed between nodes %s and %s", srcNode, dstnode)
pass = false
}
if podIP2 != "" {
e2e.Logf("Curling from pod: %s with IP: %s\n", srcPod, podIP2)
_, err := e2eoutput.RunHostCmd(ns, srcPod, "curl --connect-timeout 10 -s "+net.JoinHostPort(podIP2, "8080"))
if err != nil {
e2e.Logf("pods connection failed from %s to %s:8080", srcPod, podIP2)
srcNode, err := exutil.GetPodNodeName(oc, ns, srcPod)
o.Expect(err).NotTo(o.HaveOccurred())
dstnode, err := exutil.GetPodNodeName(oc, ns, targetPod)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("pods connection failed between nodes %s and %s", srcNode, dstnode)
pass = false
}
}
}
}
}
e2e.Logf("The pods connection pass check is %v ", pass)
return pass
}
|
networking
| ||||
function
|
openshift/openshift-tests-private
|
72d362e3-2d98-4d09-8a33-090e8abecd57
|
waitForPodsCount
|
['"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/networking/utils.go
|
func waitForPodsCount(oc *exutil.CLI, namespace, labelSelector string, expectedCount int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
allPods, getPodErr := exutil.GetAllPodsWithLabel(oc, namespace, labelSelector)
if getPodErr != nil {
e2e.Logf("Error fetching pods: %v, retrying...", getPodErr)
return false, nil
}
if len(allPods) == expectedCount {
return true, nil // Condition met, exit polling
}
e2e.Logf("Expected %d pods, but found %d. Retrying...", expectedCount, len(allPods))
return false, nil
})
}
|
networking
| ||||
test
|
openshift/openshift-tests-private
|
0feb0ed8-40d9-4cf4-b44a-6a82bb39c794
|
kubeletconfig
|
import (
"path/filepath"
g "github.com/onsi/ginkgo/v2"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
|
github.com/openshift/openshift-tests-private/test/extended/node/kubeletconfig.go
|
package node
import (
"path/filepath"
g "github.com/onsi/ginkgo/v2"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-node] NODE kubeletconfig feature", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("node-"+getRandomString(), exutil.KubeConfigPath())
// author: [email protected]
g.It("NonHyperShiftHOST-Author:minmli-Medium-39142-kubeletconfig should not prompt duplicate error message", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
kubeletConfigT := filepath.Join(buildPruningBaseDir, "kubeletconfig-maxpod.yaml")
g.By("Test for case OCP-39142")
labelKey := "custom-kubelet-" + getRandomString()
labelValue := "maxpods-" + getRandomString()
kubeletcfg39142 := kubeletCfgMaxpods{
name: "custom-kubelet-39142",
labelkey: labelKey,
labelvalue: labelValue,
maxpods: 239,
template: kubeletConfigT,
}
g.By("Create a kubeletconfig without matching machineConfigPool label")
kubeletcfg39142.createKubeletConfigMaxpods(oc)
defer kubeletcfg39142.deleteKubeletConfigMaxpods(oc)
g.By("Check kubeletconfig should not prompt duplicate error message")
keyword := "Error: could not find any MachineConfigPool set for KubeletConfig"
err := kubeletNotPromptDupErr(oc, keyword, kubeletcfg39142.name)
exutil.AssertWaitPollNoErr(err, "kubeletconfig prompt duplicate error message")
})
})
|
package node
| ||||
test case
|
openshift/openshift-tests-private
|
84f82d58-f830-47b8-8d1b-f770b5efdb3f
|
NonHyperShiftHOST-Author:minmli-Medium-39142-kubeletconfig should not prompt duplicate error message
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/node/kubeletconfig.go
|
g.It("NonHyperShiftHOST-Author:minmli-Medium-39142-kubeletconfig should not prompt duplicate error message", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
kubeletConfigT := filepath.Join(buildPruningBaseDir, "kubeletconfig-maxpod.yaml")
g.By("Test for case OCP-39142")
labelKey := "custom-kubelet-" + getRandomString()
labelValue := "maxpods-" + getRandomString()
kubeletcfg39142 := kubeletCfgMaxpods{
name: "custom-kubelet-39142",
labelkey: labelKey,
labelvalue: labelValue,
maxpods: 239,
template: kubeletConfigT,
}
g.By("Create a kubeletconfig without matching machineConfigPool label")
kubeletcfg39142.createKubeletConfigMaxpods(oc)
defer kubeletcfg39142.deleteKubeletConfigMaxpods(oc)
g.By("Check kubeletconfig should not prompt duplicate error message")
keyword := "Error: could not find any MachineConfigPool set for KubeletConfig"
err := kubeletNotPromptDupErr(oc, keyword, kubeletcfg39142.name)
exutil.AssertWaitPollNoErr(err, "kubeletconfig prompt duplicate error message")
})
| |||||
test
|
openshift/openshift-tests-private
|
371328e1-629a-450d-8f47-9c7c681a7366
|
probe
|
import (
"path/filepath"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/node/probe.go
|
package node
import (
"path/filepath"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-node] NODE Probe feature", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("node-"+getRandomString(), exutil.KubeConfigPath())
buildPruningBaseDir string
livenessProbeTemp string
startupProbeTemp string
readinessProbeTemp string
livenessProbeNoTerminateTemp string
)
g.BeforeEach(func() {
buildPruningBaseDir = exutil.FixturePath("testdata", "node")
livenessProbeTemp = filepath.Join(buildPruningBaseDir, "livenessProbe-terminationPeriod.yaml")
startupProbeTemp = filepath.Join(buildPruningBaseDir, "startupProbe-terminationPeriod.yaml")
readinessProbeTemp = filepath.Join(buildPruningBaseDir, "readinessProbe-terminationPeriod.yaml")
livenessProbeNoTerminateTemp = filepath.Join(buildPruningBaseDir, "livenessProbe-without-terminationPeriod.yaml")
})
// author: [email protected]
g.It("Author:minmli-High-41579-Liveness probe failures should terminate the pod immediately", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
podProbeT := filepath.Join(buildPruningBaseDir, "pod-liveness-probe.yaml")
g.By("Test for case OCP-41579")
g.By("create new namespace")
oc.SetupProject()
pod41579 := podLivenessProbe{
name: "probe-pod-41579",
namespace: oc.Namespace(),
overridelivenessgrace: "10",
terminationgrace: 300,
failurethreshold: 1,
periodseconds: 60,
template: podProbeT,
}
g.By("Create a pod with liveness probe")
pod41579.createPodLivenessProbe(oc)
defer pod41579.deletePodLivenessProbe(oc)
g.By("check pod status")
err := podStatus(oc, pod41579.namespace, pod41579.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("check pod events") // create function
timeout := 90
keyword := "Container test failed liveness probe, will be restarted"
err = podEvent(oc, timeout, keyword)
exutil.AssertWaitPollNoErr(err, "event check failed: "+keyword)
g.By("check pod restart in override termination grace period")
err = podStatus(oc, pod41579.namespace, pod41579.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
})
// author: [email protected]
g.It("Author:minmli-High-44493-add configurable terminationGracePeriod to liveness and startup probes", func() {
var (
testNs = oc.Namespace()
liveProbeTermP44493 = liveProbeTermPeriod{
name: "liveness-probe",
namespace: testNs,
terminationgrace: 60,
probeterminationgrace: 10,
template: livenessProbeTemp,
}
startProbeTermP44493 = startProbeTermPeriod{
name: "startup-probe",
namespace: testNs,
terminationgrace: 60,
probeterminationgrace: 10,
template: startupProbeTemp,
}
readProbeTermP44493 = readProbeTermPeriod{
name: "readiness-probe",
namespace: testNs,
terminationgrace: 60,
probeterminationgrace: 10,
template: readinessProbeTemp,
}
liveProbeNoTermP44493 = liveProbeNoTermPeriod{
name: "liveness-probe-no",
namespace: testNs,
terminationgrace: 60,
template: livenessProbeNoTerminateTemp,
}
)
g.By("Check if exist any featureSet in featuregate cluster")
featureSet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("featureSet is: %s", featureSet)
if featureSet != "" {
g.Skip("featureSet is not empty,skip it!")
}
g.By("Create a pod with liveness probe with featuregate ProbeTerminationGracePeriod enabled")
oc.SetupProject()
liveProbeTermP44493.create(oc)
ProbeTerminatePeriod(oc, liveProbeTermP44493.terminationgrace, liveProbeTermP44493.probeterminationgrace, liveProbeTermP44493.name, liveProbeTermP44493.namespace, true)
liveProbeTermP44493.delete(oc)
g.By("Create a pod with startup probe with featuregate ProbeTerminationGracePeriod enabled")
startProbeTermP44493.create(oc)
ProbeTerminatePeriod(oc, startProbeTermP44493.terminationgrace, startProbeTermP44493.probeterminationgrace, startProbeTermP44493.name, startProbeTermP44493.namespace, true)
startProbeTermP44493.delete(oc)
g.By("Create a pod with liveness probe but unset terminationGracePeriodSeconds in probe spec")
liveProbeNoTermP44493.create(oc)
ProbeTerminatePeriod(oc, liveProbeNoTermP44493.terminationgrace, 0, liveProbeNoTermP44493.name, liveProbeNoTermP44493.namespace, false)
liveProbeNoTermP44493.delete(oc)
/*
// A bug is pending for probe-level terminationGracePeriod, so comment the code temporarily
//revert featuregate afterwards
defer func() {
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate/cluster", "-p", `{"spec":{"featureSet": "CustomNoUpgrade","customNoUpgrade": {"enabled": ["ProbeTerminationGracePeriod"]}}}`, "--type=merge").Execute()
g.By("Disable ProbeTerminationGracePeriod in featuregate")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate/cluster", "-p", `{"spec":{"featureSet": "CustomNoUpgrade","customNoUpgrade": {"disabled": ["ProbeTerminationGracePeriod"]}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
g.By("Check featuregate take effect")
featureConfig := []string{"\"ProbeTerminationGracePeriod\": false"}
err = crioConfigExist(oc, featureConfig, "/etc/kubernetes/kubelet.conf")
exutil.AssertWaitPollNoErr(err, "featureGate is not set as expected")
g.By("Create a pod with liveness probe with featuregate ProbeTerminationGracePeriod disabled")
liveProbeTermP44493.name = "liveness-probe"
liveProbeTermP44493.namespace = oc.Namespace()
liveProbeTermP44493.terminationgrace = 60
liveProbeTermP44493.probeterminationgrace = 10
liveProbeTermP44493.create(oc)
ProbeTerminatePeriod(oc, liveProbeTermP44493.terminationgrace, liveProbeTermP44493.probeterminationgrace, liveProbeTermP44493.name, liveProbeTermP44493.namespace, false)
liveProbeTermP44493.delete(oc)
g.By("Create a pod with startup probe with featuregate ProbeTerminationGracePeriod disabled")
startProbeTermP44493.name = "startup-probe"
startProbeTermP44493.namespace = oc.Namespace()
startProbeTermP44493.terminationgrace = 60
startProbeTermP44493.probeterminationgrace = 10
startProbeTermP44493.create(oc)
ProbeTerminatePeriod(oc, startProbeTermP44493.terminationgrace, startProbeTermP44493.probeterminationgrace, startProbeTermP44493.name, startProbeTermP44493.namespace, false)
startProbeTermP44493.delete(oc)
*/
g.By("Can not create a pod with readiness probe with ProbeTerminationGracePeriodSeconds")
jsonCfg, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", readProbeTermP44493.template, "-p", "NAME="+readProbeTermP44493.name, "NAMESPACE="+readProbeTermP44493.namespace, "TERMINATIONGRACE="+strconv.Itoa(readProbeTermP44493.terminationgrace), "PROBETERMINATIONGRACE="+strconv.Itoa(readProbeTermP44493.probeterminationgrace)).OutputToFile("node-config-44493.json")
o.Expect(err).NotTo(o.HaveOccurred())
out, _ := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", jsonCfg).Output()
o.Expect(strings.Contains(out, "spec.containers[0].readinessProbe.terminationGracePeriodSeconds: Invalid value")).To(o.BeTrue())
})
})
|
package node
| ||||
test case
|
openshift/openshift-tests-private
|
f30dfc85-e1d3-4575-9cc2-02c475e70f7f
|
Author:minmli-High-41579-Liveness probe failures should terminate the pod immediately
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/node/probe.go
|
g.It("Author:minmli-High-41579-Liveness probe failures should terminate the pod immediately", func() {
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
podProbeT := filepath.Join(buildPruningBaseDir, "pod-liveness-probe.yaml")
g.By("Test for case OCP-41579")
g.By("create new namespace")
oc.SetupProject()
pod41579 := podLivenessProbe{
name: "probe-pod-41579",
namespace: oc.Namespace(),
overridelivenessgrace: "10",
terminationgrace: 300,
failurethreshold: 1,
periodseconds: 60,
template: podProbeT,
}
g.By("Create a pod with liveness probe")
pod41579.createPodLivenessProbe(oc)
defer pod41579.deletePodLivenessProbe(oc)
g.By("check pod status")
err := podStatus(oc, pod41579.namespace, pod41579.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("check pod events") // create function
timeout := 90
keyword := "Container test failed liveness probe, will be restarted"
err = podEvent(oc, timeout, keyword)
exutil.AssertWaitPollNoErr(err, "event check failed: "+keyword)
g.By("check pod restart in override termination grace period")
err = podStatus(oc, pod41579.namespace, pod41579.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
})
| |||||
test case
|
openshift/openshift-tests-private
|
1b3de7f5-9340-46eb-9695-5c58054a9fc8
|
Author:minmli-High-44493-add configurable terminationGracePeriod to liveness and startup probes
|
['"strconv"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/node/probe.go
|
g.It("Author:minmli-High-44493-add configurable terminationGracePeriod to liveness and startup probes", func() {
var (
testNs = oc.Namespace()
liveProbeTermP44493 = liveProbeTermPeriod{
name: "liveness-probe",
namespace: testNs,
terminationgrace: 60,
probeterminationgrace: 10,
template: livenessProbeTemp,
}
startProbeTermP44493 = startProbeTermPeriod{
name: "startup-probe",
namespace: testNs,
terminationgrace: 60,
probeterminationgrace: 10,
template: startupProbeTemp,
}
readProbeTermP44493 = readProbeTermPeriod{
name: "readiness-probe",
namespace: testNs,
terminationgrace: 60,
probeterminationgrace: 10,
template: readinessProbeTemp,
}
liveProbeNoTermP44493 = liveProbeNoTermPeriod{
name: "liveness-probe-no",
namespace: testNs,
terminationgrace: 60,
template: livenessProbeNoTerminateTemp,
}
)
g.By("Check if exist any featureSet in featuregate cluster")
featureSet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("featureSet is: %s", featureSet)
if featureSet != "" {
g.Skip("featureSet is not empty,skip it!")
}
g.By("Create a pod with liveness probe with featuregate ProbeTerminationGracePeriod enabled")
oc.SetupProject()
liveProbeTermP44493.create(oc)
ProbeTerminatePeriod(oc, liveProbeTermP44493.terminationgrace, liveProbeTermP44493.probeterminationgrace, liveProbeTermP44493.name, liveProbeTermP44493.namespace, true)
liveProbeTermP44493.delete(oc)
g.By("Create a pod with startup probe with featuregate ProbeTerminationGracePeriod enabled")
startProbeTermP44493.create(oc)
ProbeTerminatePeriod(oc, startProbeTermP44493.terminationgrace, startProbeTermP44493.probeterminationgrace, startProbeTermP44493.name, startProbeTermP44493.namespace, true)
startProbeTermP44493.delete(oc)
g.By("Create a pod with liveness probe but unset terminationGracePeriodSeconds in probe spec")
liveProbeNoTermP44493.create(oc)
ProbeTerminatePeriod(oc, liveProbeNoTermP44493.terminationgrace, 0, liveProbeNoTermP44493.name, liveProbeNoTermP44493.namespace, false)
liveProbeNoTermP44493.delete(oc)
/*
// A bug is pending for probe-level terminationGracePeriod, so comment the code temporarily
//revert featuregate afterwards
defer func() {
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate/cluster", "-p", `{"spec":{"featureSet": "CustomNoUpgrade","customNoUpgrade": {"enabled": ["ProbeTerminationGracePeriod"]}}}`, "--type=merge").Execute()
g.By("Disable ProbeTerminationGracePeriod in featuregate")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate/cluster", "-p", `{"spec":{"featureSet": "CustomNoUpgrade","customNoUpgrade": {"disabled": ["ProbeTerminationGracePeriod"]}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
g.By("Check featuregate take effect")
featureConfig := []string{"\"ProbeTerminationGracePeriod\": false"}
err = crioConfigExist(oc, featureConfig, "/etc/kubernetes/kubelet.conf")
exutil.AssertWaitPollNoErr(err, "featureGate is not set as expected")
g.By("Create a pod with liveness probe with featuregate ProbeTerminationGracePeriod disabled")
liveProbeTermP44493.name = "liveness-probe"
liveProbeTermP44493.namespace = oc.Namespace()
liveProbeTermP44493.terminationgrace = 60
liveProbeTermP44493.probeterminationgrace = 10
liveProbeTermP44493.create(oc)
ProbeTerminatePeriod(oc, liveProbeTermP44493.terminationgrace, liveProbeTermP44493.probeterminationgrace, liveProbeTermP44493.name, liveProbeTermP44493.namespace, false)
liveProbeTermP44493.delete(oc)
g.By("Create a pod with startup probe with featuregate ProbeTerminationGracePeriod disabled")
startProbeTermP44493.name = "startup-probe"
startProbeTermP44493.namespace = oc.Namespace()
startProbeTermP44493.terminationgrace = 60
startProbeTermP44493.probeterminationgrace = 10
startProbeTermP44493.create(oc)
ProbeTerminatePeriod(oc, startProbeTermP44493.terminationgrace, startProbeTermP44493.probeterminationgrace, startProbeTermP44493.name, startProbeTermP44493.namespace, false)
startProbeTermP44493.delete(oc)
*/
g.By("Can not create a pod with readiness probe with ProbeTerminationGracePeriodSeconds")
jsonCfg, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", readProbeTermP44493.template, "-p", "NAME="+readProbeTermP44493.name, "NAMESPACE="+readProbeTermP44493.namespace, "TERMINATIONGRACE="+strconv.Itoa(readProbeTermP44493.terminationgrace), "PROBETERMINATIONGRACE="+strconv.Itoa(readProbeTermP44493.probeterminationgrace)).OutputToFile("node-config-44493.json")
o.Expect(err).NotTo(o.HaveOccurred())
out, _ := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", jsonCfg).Output()
o.Expect(strings.Contains(out, "spec.containers[0].readinessProbe.terminationGracePeriodSeconds: Invalid value")).To(o.BeTrue())
})
| |||||
test
|
openshift/openshift-tests-private
|
8f366a8d-44dc-40f2-aa30-8d90442170f2
|
node
|
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/tidwall/sjson"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
"k8s.io/apimachinery/pkg/util/wait"
//e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
package node
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
"github.com/tidwall/sjson"
e2e "k8s.io/kubernetes/test/e2e/framework"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
"k8s.io/apimachinery/pkg/util/wait"
//e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
var _ = g.Describe("[sig-node] NODE initContainer policy,volume,readines,quota", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("node-"+getRandomString(), exutil.KubeConfigPath())
buildPruningBaseDir = exutil.FixturePath("testdata", "node")
customTemp = filepath.Join(buildPruningBaseDir, "pod-modify.yaml")
podTerminationTemp = filepath.Join(buildPruningBaseDir, "pod-termination.yaml")
podInitConTemp = filepath.Join(buildPruningBaseDir, "pod-initContainer.yaml")
podSigstoreTemp = filepath.Join(buildPruningBaseDir, "pod-sigStore.yaml")
podSleepTemp = filepath.Join(buildPruningBaseDir, "sleepPod46306.yaml")
kubeletConfigTemp = filepath.Join(buildPruningBaseDir, "kubeletconfig-hardeviction.yaml")
memHogTemp = filepath.Join(buildPruningBaseDir, "mem-hog-ocp11600.yaml")
podTwoContainersTemp = filepath.Join(buildPruningBaseDir, "pod-with-two-containers.yaml")
podUserNSTemp = filepath.Join(buildPruningBaseDir, "pod-user-namespace.yaml")
ctrcfgOverlayTemp = filepath.Join(buildPruningBaseDir, "containerRuntimeConfig-overlay.yaml")
podHelloTemp = filepath.Join(buildPruningBaseDir, "pod-hello.yaml")
podWkloadCpuTemp = filepath.Join(buildPruningBaseDir, "pod-workload-cpu.yaml")
podWkloadCpuNoAnTemp = filepath.Join(buildPruningBaseDir, "pod-workload-cpu-without-anotation.yaml")
podNoWkloadCpuTemp = filepath.Join(buildPruningBaseDir, "pod-without-workload-cpu.yaml")
runtimeTimeoutTemp = filepath.Join(buildPruningBaseDir, "kubeletconfig-runReqTout.yaml")
upgradeMachineConfigTemp1 = filepath.Join(buildPruningBaseDir, "custom-kubelet-test1.yaml")
upgradeMachineConfigTemp2 = filepath.Join(buildPruningBaseDir, "custom-kubelet-test2.yaml")
systemreserveTemp = filepath.Join(buildPruningBaseDir, "kubeletconfig-defaultsysres.yaml")
podLogLinkTemp = filepath.Join(buildPruningBaseDir, "pod-loglink.yaml")
livenessProbeTemp = filepath.Join(buildPruningBaseDir, "livenessProbe-terminationPeriod.yaml")
podWASMTemp = filepath.Join(buildPruningBaseDir, "pod-wasm.yaml")
podDisruptionBudgetTemp = filepath.Join(buildPruningBaseDir, "pod-disruption-budget.yaml")
genericDeploymentTemp = filepath.Join(buildPruningBaseDir, "generic-deployment.yaml")
podDevFuseTemp = filepath.Join(buildPruningBaseDir, "pod-dev-fuse.yaml")
podCpuLoadBalanceTemp = filepath.Join(buildPruningBaseDir, "pod-cpu-load-balance.yaml")
ImageconfigContTemp = filepath.Join(buildPruningBaseDir, "image-config.json")
ImgConfCont = ImgConfigContDescription{
name: "",
template: ImageconfigContTemp,
}
podDevFuse70987 = podDevFuseDescription{
name: "",
namespace: "",
template: podDevFuseTemp,
}
podLogLink65404 = podLogLinkDescription{
name: "",
namespace: "",
template: podLogLinkTemp,
}
podWkloadCpu52313 = podNoWkloadCpuDescription{
name: "",
namespace: "",
template: podNoWkloadCpuTemp,
}
podWkloadCpu52326 = podWkloadCpuDescription{
name: "",
namespace: "",
workloadcpu: "",
template: podWkloadCpuTemp,
}
podWkloadCpu52328 = podWkloadCpuDescription{
name: "",
namespace: "",
workloadcpu: "",
template: podWkloadCpuTemp,
}
podWkloadCpu52329 = podWkloadCpuNoAnotation{
name: "",
namespace: "",
workloadcpu: "",
template: podWkloadCpuNoAnTemp,
}
podHello = podHelloDescription{
name: "",
namespace: "",
template: podHelloTemp,
}
podUserNS47663 = podUserNSDescription{
name: "",
namespace: "",
template: podUserNSTemp,
}
podModify = podModifyDescription{
name: "",
namespace: "",
mountpath: "",
command: "",
args: "",
restartPolicy: "",
user: "",
role: "",
level: "",
template: customTemp,
}
podTermination = podTerminationDescription{
name: "",
namespace: "",
template: podTerminationTemp,
}
podInitCon38271 = podInitConDescription{
name: "",
namespace: "",
template: podInitConTemp,
}
podSigstore73667 = podSigstoreDescription{
name: "",
namespace: "",
template: podSigstoreTemp,
}
podSleep = podSleepDescription{
namespace: "",
template: podSleepTemp,
}
kubeletConfig = kubeletConfigDescription{
name: "",
labelkey: "",
labelvalue: "",
template: kubeletConfigTemp,
}
memHog = memHogDescription{
name: "",
namespace: "",
labelkey: "",
labelvalue: "",
template: memHogTemp,
}
podTwoContainers = podTwoContainersDescription{
name: "",
namespace: "",
template: podTwoContainersTemp,
}
ctrcfgOverlay = ctrcfgOverlayDescription{
name: "",
overlay: "",
template: ctrcfgOverlayTemp,
}
runtimeTimeout = runtimeTimeoutDescription{
name: "",
labelkey: "",
labelvalue: "",
template: runtimeTimeoutTemp,
}
upgradeMachineconfig1 = upgradeMachineconfig1Description{
name: "",
template: upgradeMachineConfigTemp1,
}
upgradeMachineconfig2 = upgradeMachineconfig2Description{
name: "",
template: upgradeMachineConfigTemp2,
}
systemReserveES = systemReserveESDescription{
name: "",
labelkey: "",
labelvalue: "",
template: systemreserveTemp,
}
)
// author: [email protected]
g.It("DEPRECATED-Author:pmali-High-12893-Init containers with restart policy Always", func() {
oc.SetupProject()
podModify.name = "init-always-fail"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "exit 1"
podModify.restartPolicy = "Always"
g.By("create FAILED init container with pod restartPolicy Always")
podModify.create(oc)
g.By("Check pod failure reason")
err := podStatusReason(oc)
exutil.AssertWaitPollNoErr(err, "pod status does not contain CrashLoopBackOff")
g.By("Delete Pod ")
podModify.delete(oc)
g.By("create SUCCESSFUL init container with pod restartPolicy Always")
podModify.name = "init-always-succ"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "sleep 30"
podModify.restartPolicy = "Always"
podModify.create(oc)
g.By("Check pod Status")
err = podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Delete Pod")
podModify.delete(oc)
})
// author: [email protected]
g.It("DEPRECATED-Author:pmali-High-12894-Init containers with restart policy OnFailure", func() {
oc.SetupProject()
podModify.name = "init-onfailure-fail"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "exit 1"
podModify.restartPolicy = "OnFailure"
g.By("create FAILED init container with pod restartPolicy OnFailure")
podModify.create(oc)
g.By("Check pod failure reason")
err := podStatusReason(oc)
exutil.AssertWaitPollNoErr(err, "pod status does not contain CrashLoopBackOff")
g.By("Delete Pod ")
podModify.delete(oc)
g.By("create SUCCESSFUL init container with pod restartPolicy OnFailure")
podModify.name = "init-onfailure-succ"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "sleep 30"
podModify.restartPolicy = "OnFailure"
podModify.create(oc)
g.By("Check pod Status")
err = podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Delete Pod ")
podModify.delete(oc)
})
// author: [email protected]
g.It("DEPRECATED-Author:pmali-High-12896-Init containers with restart policy Never", func() {
oc.SetupProject()
podModify.name = "init-never-fail"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "exit 1"
podModify.restartPolicy = "Never"
g.By("create FAILED init container with pod restartPolicy Never")
podModify.create(oc)
g.By("Check pod failure reason")
err := podStatusterminatedReason(oc)
exutil.AssertWaitPollNoErr(err, "pod status does not contain Error")
g.By("Delete Pod ")
podModify.delete(oc)
g.By("create SUCCESSFUL init container with pod restartPolicy Never")
podModify.name = "init-never-succ"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "sleep 30"
podModify.restartPolicy = "Never"
podModify.create(oc)
g.By("Check pod Status")
err = podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Delete Pod ")
podModify.delete(oc)
})
// author: [email protected]
g.It("DEPRECATED-Author:pmali-High-12911-App container status depends on init containers exit code ", func() {
oc.SetupProject()
podModify.name = "init-fail"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/false"
podModify.args = "sleep 30"
podModify.restartPolicy = "Never"
g.By("create FAILED init container with exit code and command /bin/false")
podModify.create(oc)
g.By("Check pod failure reason")
err := podStatusterminatedReason(oc)
exutil.AssertWaitPollNoErr(err, "pod status does not contain Error")
g.By("Delete Pod ")
podModify.delete(oc)
g.By("create SUCCESSFUL init container with command /bin/true")
podModify.name = "init-success"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/true"
podModify.args = "sleep 30"
podModify.restartPolicy = "Never"
podModify.create(oc)
g.By("Check pod Status")
err = podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Delete Pod ")
podModify.delete(oc)
})
// author: [email protected]
g.It("DEPRECATED-Author:pmali-High-12913-Init containers with volume work fine", func() {
oc.SetupProject()
podModify.name = "init-volume"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "echo This is OCP volume test > /work-dir/volume-test"
podModify.restartPolicy = "Never"
g.By("Create a pod with initContainer using volume\n")
podModify.create(oc)
g.By("Check pod status")
err := podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check Vol status\n")
err = volStatus(oc)
exutil.AssertWaitPollNoErr(err, "Init containers with volume do not work fine")
g.By("Delete Pod\n")
podModify.delete(oc)
})
// author: [email protected]
g.It("Author:pmali-Medium-30521-CRIO Termination Grace Period test", func() {
oc.SetupProject()
podTermination.name = "pod-termination"
podTermination.namespace = oc.Namespace()
g.By("Create a pod with termination grace period\n")
podTermination.create(oc)
g.By("Check pod status\n")
err := podStatus(oc, podTermination.namespace, podTermination.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check container TimeoutStopUSec\n")
err = podTermination.getTerminationGrace(oc)
exutil.AssertWaitPollNoErr(err, "terminationGracePeriodSeconds is not valid")
g.By("Delete Pod\n")
podTermination.delete(oc)
})
// author: [email protected]
g.It("Author:minmli-High-38271-Init containers should not restart when the exited init container is removed from node", func() {
g.By("Test for case OCP-38271")
oc.SetupProject()
podInitCon38271.name = "initcon-pod"
podInitCon38271.namespace = oc.Namespace()
g.By("Create a pod with init container")
podInitCon38271.create(oc)
defer podInitCon38271.delete(oc)
g.By("Check pod status")
err := podStatus(oc, podInitCon38271.namespace, podInitCon38271.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check init container exit normally")
err = podInitCon38271.containerExit(oc)
exutil.AssertWaitPollNoErr(err, "conainer not exit normally")
g.By("Delete init container")
_, err = podInitCon38271.deleteInitContainer(oc)
exutil.AssertWaitPollNoErr(err, "fail to delete container")
g.By("Check init container not restart again")
err = podInitCon38271.initContainerNotRestart(oc)
exutil.AssertWaitPollNoErr(err, "init container restart")
})
// author: [email protected]
g.It("Author:schoudha-High-70987-Allow dev fuse by default in CRI-O", func() {
exutil.By("Test for case OCP-70987")
podDevFuse70987.name = "pod-devfuse"
podDevFuse70987.namespace = oc.Namespace()
defer podDevFuse70987.delete(oc)
exutil.By("Create a pod with dev fuse")
podDevFuse70987.create(oc)
exutil.By("Check pod status")
err := podStatus(oc, podDevFuse70987.namespace, podDevFuse70987.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check if dev fuse is mounted inside the pod")
err = checkDevFuseMount(oc, podDevFuse70987.namespace, podDevFuse70987.name)
exutil.AssertWaitPollNoErr(err, "dev fuse is not mounted inside pod")
})
// author: [email protected]
g.It("DEPRECATED-NonPreRelease-Longduration-Author:pmali-High-46306-Node should not becomes NotReady with error creating container storage layer not known[Disruptive][Slow]", func() {
oc.SetupProject()
podSleep.namespace = oc.Namespace()
g.By("Get Worker Node and Add label app=sleep\n")
workerNodeName := getSingleWorkerNode(oc)
addLabelToResource(oc, "app=sleep", workerNodeName, "nodes")
defer removeLabelFromNode(oc, "app-", workerNodeName, "nodes")
g.By("Create a 50 pods on the same node\n")
for i := 0; i < 50; i++ {
podSleep.create(oc)
}
g.By("Check pod status\n")
err := podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is NOT running")
g.By("Delete project\n")
go podSleep.deleteProject(oc)
g.By("Reboot Worker node\n")
go rebootNode(oc, workerNodeName)
//g.By("****** Reboot Worker Node ****** ")
//exutil.DebugNodeWithChroot(oc, workerNodeName, "reboot")
//g.By("Check Nodes Status\n")
//err = checkNodeStatus(oc, workerNodeName)
//exutil.AssertWaitPollNoErr(err, "node is not ready")
g.By("Get Master node\n")
masterNode := getSingleMasterNode(oc)
g.By("Check Master Node Logs\n")
err = masterNodeLog(oc, masterNode)
exutil.AssertWaitPollNoErr(err, "Logs Found, Test Failed")
})
// author: [email protected]
g.It("DEPRECATED-Longduration-NonPreRelease-Author:pmali-Medium-11600-kubelet will evict pod immediately when met hard eviction threshold memory [Disruptive][Slow]", func() {
oc.SetupProject()
kubeletConfig.name = "kubeletconfig-ocp11600"
kubeletConfig.labelkey = "custom-kubelet-ocp11600"
kubeletConfig.labelvalue = "hard-eviction"
memHog.name = "mem-hog-ocp11600"
memHog.namespace = oc.Namespace()
memHog.labelkey = kubeletConfig.labelkey
memHog.labelvalue = kubeletConfig.labelvalue
g.By("Get Worker Node and Add label custom-kubelet-ocp11600=hard-eviction\n")
addLabelToResource(oc, "custom-kubelet-ocp11600=hard-eviction", "worker", "mcp")
defer removeLabelFromNode(oc, "custom-kubelet-ocp11600-", "worker", "mcp")
g.By("Create Kubelet config \n")
kubeletConfig.create(oc)
defer getmcpStatus(oc, "worker") // To check all the Nodes are in Ready State after deleteing kubeletconfig
defer cleanupObjectsClusterScope(oc, objectTableRefcscope{"kubeletconfig", "kubeletconfig-ocp11600"})
g.By("Make sure Worker mcp is Updated correctly\n")
err := getmcpStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
g.By("Create a 10 pods on the same node\n")
for i := 0; i < 10; i++ {
memHog.create(oc)
}
defer cleanupObjectsClusterScope(oc, objectTableRefcscope{"ns", oc.Namespace()})
g.By("Check worker Node events\n")
workerNodeName := getSingleWorkerNode(oc)
err = getWorkerNodeDescribe(oc, workerNodeName)
exutil.AssertWaitPollNoErr(err, "Logs did not Found memory pressure, Test Failed")
})
// author: [email protected]
g.It("Author:weinliu-Critical-11055-/dev/shm can be automatically shared among all of a pod's containers", func() {
g.By("Test for case OCP-11055")
oc.SetupProject()
podTwoContainers.name = "pod-twocontainers"
podTwoContainers.namespace = oc.Namespace()
g.By("Create a pod with two containers")
podTwoContainers.create(oc)
defer podTwoContainers.delete(oc)
g.By("Check pod status")
err := podStatus(oc, podTwoContainers.namespace, podTwoContainers.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Enter container 1 and write files")
_, err = exutil.RemoteShPodWithBashSpecifyContainer(oc, podTwoContainers.namespace, podTwoContainers.name, "hello-openshift", "echo 'written_from_container1' > /dev/shm/c1")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Enter container 2 and check whether it can share container 1 shared files")
containerFile1, err := exutil.RemoteShPodWithBashSpecifyContainer(oc, podTwoContainers.namespace, podTwoContainers.name, "hello-openshift-fedora", "cat /dev/shm/c1")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Container1 File Content is: %v", containerFile1)
o.Expect(containerFile1).To(o.Equal("written_from_container1"))
g.By("Enter container 2 and write files")
_, err = exutil.RemoteShPodWithBashSpecifyContainer(oc, podTwoContainers.namespace, podTwoContainers.name, "hello-openshift-fedora", "echo 'written_from_container2' > /dev/shm/c2")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Enter container 1 and check whether it can share container 2 shared files")
containerFile2, err := exutil.RemoteShPodWithBashSpecifyContainer(oc, podTwoContainers.namespace, podTwoContainers.name, "hello-openshift", "cat /dev/shm/c2")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Container2 File Content is: %v", containerFile2)
o.Expect(containerFile2).To(o.Equal("written_from_container2"))
})
// author: [email protected]
g.It("DEPRECATED-Author:minmli-High-47663-run pods in user namespaces via crio workload annotation", func() {
oc.SetupProject()
g.By("Test for case OCP-47663")
podUserNS47663.name = "userns-47663"
podUserNS47663.namespace = oc.Namespace()
g.By("Check workload of openshift-builder exist in crio config")
err := podUserNS47663.crioWorkloadConfigExist(oc)
exutil.AssertWaitPollNoErr(err, "crio workload config not exist")
g.By("Check user containers exist in /etc/sub[ug]id")
err = podUserNS47663.userContainersExistForNS(oc)
exutil.AssertWaitPollNoErr(err, "user containers not exist for user namespace")
g.By("Create a pod with annotation of openshift-builder workload")
podUserNS47663.createPodUserNS(oc)
defer podUserNS47663.deletePodUserNS(oc)
g.By("Check pod status")
err = podStatus(oc, podUserNS47663.namespace, podUserNS47663.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check pod run in user namespace")
err = podUserNS47663.podRunInUserNS(oc)
exutil.AssertWaitPollNoErr(err, "pod not run in user namespace")
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-52328-set workload resource usage from pod level : pod should not take effect if not defaulted or specified in workload [Disruptive][Slow]", func() {
oc.SetupProject()
exutil.By("Test for case OCP-52328")
exutil.By("Create a machine config for workload setting")
mcCpuOverride := filepath.Join(buildPruningBaseDir, "machineconfig-cpu-override-52328.yaml")
mcpName := "worker"
defer func() {
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcCpuOverride).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcCpuOverride).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check mcp finish rolling out")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check workload setting is as expected")
wkloadConfig := []string{"crio.runtime.workloads.management", "activation_annotation = \"io.openshift.manager\"", "annotation_prefix = \"io.openshift.workload.manager\"", "crio.runtime.workloads.management.resources", "cpushares = 512"}
configPath := "/etc/crio/crio.conf.d/01-workload.conf"
err = configExist(oc, wkloadConfig, configPath)
exutil.AssertWaitPollNoErr(err, "workload setting is not set as expected")
exutil.By("Create a pod not specify cpuset in workload setting by annotation")
defer podWkloadCpu52328.delete(oc)
podWkloadCpu52328.name = "wkloadcpu-52328"
podWkloadCpu52328.namespace = oc.Namespace()
podWkloadCpu52328.workloadcpu = "{\"cpuset\": \"\", \"cpushares\": 1024}"
podWkloadCpu52328.create(oc)
exutil.By("Check pod status")
err = podStatus(oc, podWkloadCpu52328.namespace, podWkloadCpu52328.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check the pod only override cpushares")
cpuset := ""
err = overrideWkloadCpu(oc, cpuset, podWkloadCpu52328.namespace)
exutil.AssertWaitPollNoErr(err, "the pod not only override cpushares in workload setting")
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-52313-High-52326-High-52329-set workload resource usage from pod level : pod can get configured to defaults and override defaults and pod should not be set if annotation not specified [Disruptive][Slow]", func() {
oc.SetupProject()
exutil.By("Test for case OCP-52313, OCP-52326 and OCP-52329")
exutil.By("Create a machine config for workload setting")
mcCpuOverride := filepath.Join(buildPruningBaseDir, "machineconfig-cpu-override.yaml")
defer func() {
mcpName := "worker"
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcCpuOverride).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcCpuOverride).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check mcp finish rolling out")
mcpName := "worker"
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check workload setting is as expected")
wkloadConfig := []string{"crio.runtime.workloads.management", "activation_annotation = \"io.openshift.manager\"", "annotation_prefix = \"io.openshift.workload.manager\"", "crio.runtime.workloads.management.resources", "cpushares = 512", "cpuset = \"0\""}
configPath := "/etc/crio/crio.conf.d/01-workload.conf"
err = configExist(oc, wkloadConfig, configPath)
exutil.AssertWaitPollNoErr(err, "workload setting is not set as expected")
exutil.By("Create a pod with default workload setting by annotation")
podWkloadCpu52313.name = "wkloadcpu-52313"
podWkloadCpu52313.namespace = oc.Namespace()
podWkloadCpu52313.create(oc)
exutil.By("Check pod status")
err = podStatus(oc, podWkloadCpu52313.namespace, podWkloadCpu52313.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check the pod get configured to default workload setting")
cpuset := "0"
err = overrideWkloadCpu(oc, cpuset, podWkloadCpu52313.namespace)
exutil.AssertWaitPollNoErr(err, "the pod is not configured to default workload setting")
podWkloadCpu52313.delete(oc)
exutil.By("Create a pod override the default workload setting by annotation")
podWkloadCpu52326.name = "wkloadcpu-52326"
podWkloadCpu52326.namespace = oc.Namespace()
podWkloadCpu52326.workloadcpu = "{\"cpuset\": \"0-1\", \"cpushares\": 200}"
podWkloadCpu52326.create(oc)
exutil.By("Check pod status")
err = podStatus(oc, podWkloadCpu52326.namespace, podWkloadCpu52326.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check the pod override the default workload setting")
cpuset = "0-1"
err = overrideWkloadCpu(oc, cpuset, podWkloadCpu52326.namespace)
exutil.AssertWaitPollNoErr(err, "the pod not override the default workload setting")
podWkloadCpu52326.delete(oc)
exutil.By("Create a pod without annotation but with prefix")
defer podWkloadCpu52329.delete(oc)
podWkloadCpu52329.name = "wkloadcpu-52329"
podWkloadCpu52329.namespace = oc.Namespace()
podWkloadCpu52329.workloadcpu = "{\"cpuset\": \"0-1\", \"cpushares\": 1800}"
podWkloadCpu52329.create(oc)
exutil.By("Check pod status")
err = podStatus(oc, podWkloadCpu52329.namespace, podWkloadCpu52329.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check the pod keep default workload setting")
cpuset = "0-1"
err = defaultWkloadCpu(oc, cpuset, podWkloadCpu52329.namespace)
exutil.AssertWaitPollNoErr(err, "the pod not keep efault workload setting")
})
// author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-LEVEL0-High-46313-set overlaySize in containerRuntimeConfig should take effect in container [Disruptive][Slow]", func() {
oc.SetupProject()
g.By("Test for case OCP-46313")
ctrcfgOverlay.name = "ctrcfg-46313"
ctrcfgOverlay.overlay = "9G"
g.By("Create a containerRuntimeConfig to set overlaySize")
ctrcfgOverlay.create(oc)
defer func() {
g.By("Deleting configRuntimeConfig")
cleanupObjectsClusterScope(oc, objectTableRefcscope{"ContainerRuntimeConfig", "ctrcfg-46313"})
g.By("Check mcp finish rolling out")
err := getmcpStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
}()
g.By("Check mcp finish rolling out")
err := getmcpStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
g.By("Check overlaySize take effect in config file")
err = checkOverlaySize(oc, ctrcfgOverlay.overlay)
exutil.AssertWaitPollNoErr(err, "overlaySize not take effect")
g.By("Create a pod")
podTermination.name = "pod-46313"
podTermination.namespace = oc.Namespace()
podTermination.create(oc)
defer podTermination.delete(oc)
g.By("Check pod status")
err = podStatus(oc, podTermination.namespace, podTermination.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check in pod the root partition size for Overlay is correct.")
err = checkPodOverlaySize(oc, ctrcfgOverlay.overlay)
exutil.AssertWaitPollNoErr(err, "pod overlay size is not correct !!!")
})
g.It("Author:minmli-High-56266-kubelet/crio will delete netns when a pod is deleted", func() {
g.By("Test for case OCP-56266")
oc.SetupProject()
g.By("Create a pod")
podHello.name = "pod-56266"
podHello.namespace = oc.Namespace()
podHello.create(oc)
g.By("Check pod status")
err := podStatus(oc, podHello.namespace, podHello.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Get Pod's Node name")
hostname := getPodNodeName(oc, podHello.namespace)
g.By("Get Pod's NetNS")
netNsPath, err := getPodNetNs(oc, hostname)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete the pod")
podHello.delete(oc)
g.By("Check the NetNs file was cleaned")
err = checkNetNs(oc, hostname, netNsPath)
exutil.AssertWaitPollNoErr(err, "the NetNs file is not cleaned !!!")
})
g.It("Author:minmli-High-55486-check not exist error MountVolume SetUp failed for volume serviceca object openshift-image-registry serviceca not registered", func() {
g.By("Test for case OCP-55486")
oc.SetupProject()
g.By("Check events of each cronjob")
err := checkEventsForErr(oc)
exutil.AssertWaitPollNoErr(err, "Found error: MountVolume.SetUp failed for volume ... not registered ")
})
//author: [email protected]
g.It("Author:asahay-Medium-55033-check KUBELET_LOG_LEVEL is 2", func() {
g.By("Test for OCP-55033")
g.By("check Kubelet Log Level\n")
assertKubeletLogLevel(oc)
})
//author: [email protected]
g.It("Author:asahay-NonHyperShiftHOST-NonPreRelease-Longduration-LEVEL0-High-52472-update runtimeRequestTimeout parameter using KubeletConfig CR [Disruptive][Slow]", func() {
oc.SetupProject()
runtimeTimeout.name = "kubeletconfig-52472"
runtimeTimeout.labelkey = "custom-kubelet"
runtimeTimeout.labelvalue = "test-timeout"
g.By("Label mcp worker custom-kubelet as test-timeout \n")
addLabelToResource(oc, "custom-kubelet=test-timeout", "worker", "mcp")
defer removeLabelFromNode(oc, "custom-kubelet-", "worker", "mcp")
g.By("Create KubeletConfig \n")
defer func() {
mcpName := "worker"
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer runtimeTimeout.delete(oc)
runtimeTimeout.create(oc)
g.By("Check mcp finish rolling out")
mcpName := "worker"
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
g.By("Check Runtime Request Timeout")
runTimeTimeout(oc)
})
//author :[email protected]
g.It("Author:asahay-NonHyperShiftHOST-NonPreRelease-PreChkUpgrade-High-45436-Upgrading a cluster by making sure not keep duplicate machine config when it has multiple kubeletconfig [Disruptive][Slow]", func() {
upgradeMachineconfig1.name = "max-pod"
upgradeMachineconfig2.name = "max-pod-1"
g.By("Create first KubeletConfig \n")
upgradeMachineconfig1.create(oc)
g.By("Check mcp finish rolling out")
mcpName := "worker"
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
g.By("Create second KubeletConfig \n")
upgradeMachineconfig2.create(oc)
g.By("Check mcp finish rolling out")
mcpName1 := "worker"
err1 := checkMachineConfigPoolStatus(oc, mcpName1)
exutil.AssertWaitPollNoErr(err1, "macineconfigpool worker update failed")
})
g.It("Author:asahay-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-High-45436-post check Upgrading a cluster by making sure not keep duplicate machine config when it has multiple kubeletconfig [Disruptive][Slow]", func() {
upgradeMachineconfig1.name = "max-pod"
defer func() {
g.By("Delete the KubeletConfig")
cleanupObjectsClusterScope(oc, objectTableRefcscope{"KubeletConfig", upgradeMachineconfig1.name})
g.By("Check mcp finish rolling out")
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
}()
upgradeMachineconfig2.name = "max-pod-1"
defer func() {
g.By("Delete the KubeletConfig")
cleanupObjectsClusterScope(oc, objectTableRefcscope{"KubeletConfig", upgradeMachineconfig2.name})
g.By("Check mcp finish rolling out")
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
}()
g.By("Checking no duplicate machine config")
checkUpgradeMachineConfig(oc)
})
//author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-PreChkUpgrade-Author:minmli-High-45351-prepare to check crioConfig[Disruptive][Slow]", func() {
rhelWorkers, err := exutil.GetAllWorkerNodesByOSID(oc, "rhel")
o.Expect(err).NotTo(o.HaveOccurred())
if len(rhelWorkers) > 0 {
g.Skip("ctrcfg.overlay can't be supported by rhel nodes")
}
if exutil.IsSNOCluster(oc) || exutil.Is3MasterNoDedicatedWorkerNode(oc) {
g.Skip("Skipped: Skip test for SNO/Compact clusters")
}
g.By("1) oc debug one worker and edit /etc/crio/crio.conf")
// we update log_level = "debug" in /etc/crio/crio.conf
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodename := nodeList.Items[0].Name
_, err = exutil.DebugNodeWithChroot(oc, nodename, "/bin/bash", "-c", "sed -i 's/log_level = \"info\"/log_level = \"debug\"/g' /etc/crio/crio.conf")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("2) create a ContainerRuntimeConfig to set overlaySize")
ctrcfgOverlay.name = "ctrcfg-45351"
ctrcfgOverlay.overlay = "35G"
mcpName := "worker"
ctrcfgOverlay.create(oc)
g.By("3) check mcp finish rolling out")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "mcp update failed")
g.By("4) check overlaySize update as expected")
err = checkOverlaySize(oc, ctrcfgOverlay.overlay)
exutil.AssertWaitPollNoErr(err, "overlaySize not update as expected")
})
//author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-Author:minmli-High-45351-post check crioConfig[Disruptive][Slow]", func() {
rhelWorkers, err := exutil.GetAllWorkerNodesByOSID(oc, "rhel")
o.Expect(err).NotTo(o.HaveOccurred())
if len(rhelWorkers) > 0 {
g.Skip("ctrcfg.overlay can't be supported by rhel nodes")
}
if exutil.IsSNOCluster(oc) || exutil.Is3MasterNoDedicatedWorkerNode(oc) {
g.Skip("Skipped: Skip test for SNO/Compact clusters")
}
g.By("1) check overlaySize don't change after upgrade")
ctrcfgOverlay.name = "ctrcfg-45351"
ctrcfgOverlay.overlay = "35G"
defer func() {
g.By("Delete the configRuntimeConfig")
cleanupObjectsClusterScope(oc, objectTableRefcscope{"ContainerRuntimeConfig", ctrcfgOverlay.name})
g.By("Check mcp finish rolling out")
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
}()
defer func() {
g.By("Restore /etc/crio/crio.conf")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
for _, node := range nodeList.Items {
nodename := node.Name
_, err = exutil.DebugNodeWithChroot(oc, nodename, "/bin/bash", "-c", "sed -i 's/log_level = \"debug\"/log_level = \"info\"/g' /etc/crio/crio.conf")
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
err = checkOverlaySize(oc, ctrcfgOverlay.overlay)
exutil.AssertWaitPollNoErr(err, "overlaySize change after upgrade")
g.By("2) check conmon value from crio config")
//we need check every node for the conmon = ""
checkConmonForAllNode(oc)
})
g.It("Author:asahay-Medium-57332-collecting the audit log with must gather", func() {
defer exec.Command("bash", "-c", "rm -rf /tmp/must-gather-57332").Output()
g.By("Running the must gather command \n")
_, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--dest-dir=/tmp/must-gather-57332", "--", "/usr/bin/gather_audit_logs").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check the must-gather result")
_, err = exec.Command("bash", "-c", "ls -l /tmp/must-gather-57332").Output()
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("Author:asahay-NonHyperShiftHOST-Longduration-NonPreRelease-High-44820-change container registry config [Serial][Slow]", func() {
ImgConfCont.name = "cluster"
expectedStatus1 := map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}
exutil.By("Verifying Config Changes in Image Registry")
exutil.By("#. Copy and save existing CRD configuration in JSON format")
originImageConfigJSON, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("image.config", "cluster", "-o", "json").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("\n Original Image Configuration %v", originImageConfigJSON)
defer func() {
exutil.By("restore original ImageConfig")
createImageConfigWIthExportJSON(oc, originImageConfigJSON) // restore original yaml
exutil.By("Check mcp finish updating")
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "Worker MCP is not updated")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "Master MCP is not updated")
exutil.By("Check the openshift-apiserver operator status")
err = waitCoBecomes(oc, "openshift-apiserver", 480, expectedStatus1)
exutil.AssertWaitPollNoErr(err, "openshift-apiserver operator does not become available in 480 seconds")
exutil.By("Check the image-registry operator status")
err = waitCoBecomes(oc, "image-registry", 480, expectedStatus1)
exutil.AssertWaitPollNoErr(err, "image-registry operator does not become available in 480 seconds")
}()
checkImageConfigUpdatedAsExpected(oc)
})
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-57401-Create ImageDigestMirrorSet successfully [Disruptive][Slow]", func() {
//If a cluster contains any ICSP or IDMS, it will skip the case
if checkICSP(oc) || checkIDMS(oc) {
g.Skip("This cluster contain ICSP or IDMS, skip the test.")
}
exutil.By("Create an ImageDigestMirrorSet")
idms := filepath.Join(buildPruningBaseDir, "ImageDigestMirrorSet.yaml")
defer func() {
err := checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + idms).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + idms).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check the ImageDigestMirrorSet apply to config")
err = checkRegistryForIdms(oc)
exutil.AssertWaitPollNoErr(err, "check registry config failed")
exutil.By("The ImageContentSourcePolicy can't exist wiht ImageDigestMirrorSet or ImageTagMirrorSet")
icsp := filepath.Join(buildPruningBaseDir, "ImageContentSourcePolicy.yaml")
out, _ := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", icsp).Output()
o.Expect(strings.Contains(out, "Kind.ImageContentSourcePolicy: Forbidden: can't create ImageContentSourcePolicy when ImageDigestMirrorSet resources exist")).To(o.BeTrue())
})
//author: [email protected]
g.It("NonHyperShiftHOST-Author:minmli-Medium-59552-Enable image signature verification for Red Hat Container Registries [Serial]", func() {
exutil.By("Check if mcp worker exist in current cluster")
machineCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-o=jsonpath={.status.machineCount}").Output()
if machineCount == "0" {
g.Skip("Skip for non-supported platform: mcp worker not exist!")
}
exutil.By("Apply a machine config to set image signature policy for worker nodes")
mcImgSig := filepath.Join(buildPruningBaseDir, "machineconfig-image-signature-59552.yaml")
mcpName := "worker"
defer func() {
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcImgSig).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcImgSig).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check the signature configuration policy.json")
err = checkImgSignature(oc)
exutil.AssertWaitPollNoErr(err, "check signature configuration failed")
})
g.It("Author:asahay-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-62746-A default SYSTEM_RESERVED_ES value is applied if it is empty [Disruptive][Slow]", func() {
exutil.By("set SYSTEM_RESERVED_ES as empty")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodename := nodeList.Items[0].Name
_, err = exutil.DebugNodeWithChroot(oc, nodename, "/bin/bash", "-c", "sed -i 's/SYSTEM_RESERVED_ES=1Gi/SYSTEM_RESERVED_ES=/g' /etc/crio/crio.conf")
o.Expect(err).NotTo(o.HaveOccurred())
systemReserveES.name = "kubeletconfig-62746"
systemReserveES.labelkey = "custom-kubelet"
systemReserveES.labelvalue = "reserve-space"
exutil.By("Label mcp worker custom-kubelet as reserve-space \n")
addLabelToResource(oc, "custom-kubelet=reserve-space", "worker", "mcp")
defer removeLabelFromNode(oc, "custom-kubelet-", "worker", "mcp")
exutil.By("Create KubeletConfig \n")
defer func() {
mcpName := "worker"
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer systemReserveES.delete(oc)
systemReserveES.create(oc)
exutil.By("Check mcp finish rolling out")
mcpName := "worker"
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check Default value")
parameterCheck(oc)
})
//author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-65404-log link inside pod via crio works well [Disruptive]", func() {
exutil.By("Apply a machine config to enable log link via crio")
mcLogLink := filepath.Join(buildPruningBaseDir, "machineconfig-log-link.yaml")
mcpName := "worker"
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcLogLink).Execute()
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcLogLink).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check the crio config as expected")
logLinkConfig := []string{"crio.runtime.workloads.linked", "activation_annotation = \"io.kubernetes.cri-o.LinkLogs\"", "allowed_annotations = [ \"io.kubernetes.cri-o.LinkLogs\" ]"}
configPath := "/etc/crio/crio.conf.d/99-linked-log.conf"
err = configExist(oc, logLinkConfig, configPath)
exutil.AssertWaitPollNoErr(err, "crio config is not set as expected")
exutil.By("Create a pod with LinkLogs annotation")
podLogLink65404.name = "httpd"
podLogLink65404.namespace = oc.Namespace()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", oc.Namespace(), "security.openshift.io/scc.podSecurityLabelSync=false", "pod-security.kubernetes.io/enforce=privileged", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer podLogLink65404.delete(oc)
podLogLink65404.create(oc)
exutil.By("Check pod status")
err = podStatus(oc, podLogLink65404.namespace, podLogLink65404.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check log link successfully")
checkLogLink(oc, podLogLink65404.namespace)
})
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-55683-Crun on OpenShift enable [Disruptive]", func() {
exutil.By("Apply a ContarinerRuntimeConfig to enable crun")
ctrcfgCrun := filepath.Join(buildPruningBaseDir, "containerRuntimeConfig-crun.yaml")
mcpName := "worker"
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + ctrcfgCrun).Execute()
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + ctrcfgCrun).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check crun is running")
checkCrun(oc)
})
g.It("Author:minmli-DEPRECATED-High-68184-container_network metrics should keep reporting after container restart", func() {
livenessProbeTermP68184 := liveProbeTermPeriod{
name: "liveness-probe",
namespace: oc.Namespace(),
terminationgrace: 60,
probeterminationgrace: 10,
template: livenessProbeTemp,
}
exutil.By("Create a pod")
defer livenessProbeTermP68184.delete(oc)
livenessProbeTermP68184.create(oc)
exutil.By("Check pod status")
err := podStatus(oc, livenessProbeTermP68184.namespace, livenessProbeTermP68184.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check the container_network* metrics report well")
podNode, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", livenessProbeTermP68184.name, "-o=jsonpath={.spec.nodeName}", "-n", livenessProbeTermP68184.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("podNode is :%v", podNode)
var cmdOut1 string
var cmdOut2 string
waitErr := wait.Poll(10*time.Second, 70*time.Second, func() (bool, error) {
cmd1 := fmt.Sprintf(`oc get --raw /api/v1/nodes/%v/proxy/metrics/cadvisor | grep container_network_transmit | grep %v || true`, podNode, livenessProbeTermP68184.name)
cmdOut1, err := exec.Command("bash", "-c", cmd1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(cmdOut1), "container_network_transmit_bytes_total") && strings.Contains(string(cmdOut1), "container_network_transmit_errors_total") && strings.Contains(string(cmdOut1), "container_network_transmit_packets_dropped_total") && strings.Contains(string(cmdOut1), "container_network_transmit_packets_total") {
e2e.Logf("\ncontainer_network* metrics report well after pod start")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, fmt.Sprintf("check metrics failed after pod start! Metric result is: \n %v \n", cmdOut1))
exutil.By("Check the container_network* metrics still report after container restart")
waitErr = wait.Poll(80*time.Second, 5*time.Minute, func() (bool, error) {
restartCount, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", livenessProbeTermP68184.name, "-o=jsonpath={.status.containerStatuses[0].restartCount}", "-n", livenessProbeTermP68184.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("restartCount is :%v", restartCount)
o.Expect(strconv.Atoi(restartCount)).Should(o.BeNumerically(">=", 1), "error: the pod restart time < 1")
cmd2 := fmt.Sprintf(`oc get --raw /api/v1/nodes/%v/proxy/metrics/cadvisor | grep container_network_transmit | grep %v || true`, podNode, livenessProbeTermP68184.name)
cmdOut2, err := exec.Command("bash", "-c", cmd2).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(cmdOut2), "container_network_transmit_bytes_total") && strings.Contains(string(cmdOut2), "container_network_transmit_errors_total") && strings.Contains(string(cmdOut2), "container_network_transmit_packets_dropped_total") && strings.Contains(string(cmdOut2), "container_network_transmit_packets_total") {
e2e.Logf("\ncontainer_network* metrics report well after pod restart")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, fmt.Sprintf("check metrics failed after pod restart! Metric result is: \n %v \n", cmdOut2))
})
//author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-Medium-66398-Enable WASM workloads in OCP", func() {
podWASM66398 := podWASM{
name: "wasm-http",
namespace: oc.Namespace(),
template: podWASMTemp,
}
exutil.By("Apply a machineconfig to configure crun-wasm as the default runtime")
mcWASM := filepath.Join(buildPruningBaseDir, "machineconfig-wasm.yaml")
mcpName := "worker"
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcWASM).Execute()
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcWASM).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the machine config pool finish updating")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Verify the crun-wasm is configured as expected")
wasmConfig := []string{"crio.runtime", "default_runtime = \"crun-wasm\"", "crio.runtime.runtimes.crun-wasm", "runtime_path = \"/usr/bin/crun\"", "crio.runtime.runtimes.crun-wasm.platform_runtime_paths", "\"wasi/wasm32\" = \"/usr/bin/crun-wasm\""}
configPath := "/etc/crio/crio.conf.d/99-crun-wasm.conf"
err = configExist(oc, wasmConfig, configPath)
exutil.AssertWaitPollNoErr(err, "crun-wasm is not set as expected")
exutil.By("Check if wasm bits are enabled appropriately")
exutil.By("1)label namespace pod-security.kubernetes.io/enforce=baseline")
addLabelToResource(oc, "pod-security.kubernetes.io/enforce=baseline", oc.Namespace(), "namespace")
exutil.By("2)Create a pod")
defer podWASM66398.delete(oc)
podWASM66398.create(oc)
exutil.By("3)Check pod status")
err = podStatus(oc, podWASM66398.namespace, podWASM66398.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("4)Expose the pod as a service")
_, err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("pod", podWASM66398.name, "-n", podWASM66398.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5)Expose the service as a route")
_, err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("service", podWASM66398.name, "-n", podWASM66398.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6)Get the route name")
routeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", podWASM66398.name, "-ojsonpath={.spec.host}", "-n", podWASM66398.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("7)Curl the route name")
out, err := exec.Command("bash", "-c", "curl "+routeName+" -d \"Hello world!\"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(out), "echo: Hello world!")).Should(o.BeTrue())
})
//author: [email protected]
//automates: https://issues.redhat.com/browse/OCPBUGS-15035
g.It("NonHyperShiftHOST-NonPreRelease-Author:jfrancoa-Medium-67564-node's drain should block when PodDisruptionBudget minAvailable equals 100 percentage and selector is empty [Disruptive]", func() {
exutil.By("Create a deployment with 6 replicas")
deploy := NewDeployment("hello-openshift", oc.Namespace(), "6", genericDeploymentTemp)
defer deploy.delete(oc)
deploy.create(oc)
deploy.waitForCreation(oc, 5)
exutil.By("Create PodDisruptionBudget")
pdb := NewPDB("my-pdb", oc.Namespace(), "100%", podDisruptionBudgetTemp)
defer pdb.delete(oc)
pdb.create(oc)
worker := getSingleWorkerNode(oc)
exutil.By(fmt.Sprintf("Obtain the pods running on node %v", worker))
podsInWorker, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("pods", "-n", oc.Namespace(), "-o=jsonpath={.items[?(@.spec.nodeName=='"+worker+"')].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(strings.Split(podsInWorker, " "))).Should(o.BeNumerically(">", 0))
// if the pdb's status is false and reason InsufficientPods
// means that it's not possible to drain a node keeping the
// required minimum availability, therefore the drain operation
// should block.
exutil.By("Make sure that PDB's status is False")
pdbStatus, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("poddisruptionbudget", "my-pdb", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[0].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(pdbStatus, "False")).Should(o.BeTrue())
exutil.By(fmt.Sprintf("Drain the node %v", worker))
defer waitClusterOperatorAvailable(oc)
defer oc.WithoutNamespace().AsAdmin().Run("adm").Args("uncordon", worker).Execute()
// Try to drain the node (it should fail) due to the 100%'s PDB minAvailability
// as the draining is impossible to happen, if we don't pass a timeout value this
// command will wait forever, as default timeout is 0s, which means infinite.
out, err := oc.WithoutNamespace().AsAdmin().Run("adm").Args("drain", worker, "--ignore-daemonsets", "--delete-emptydir-data", "--timeout=30s").Output()
o.Expect(err).To(o.HaveOccurred(), "Drain operation should have been blocked but it wasn't")
o.Expect(strings.Contains(out, "Cannot evict pod as it would violate the pod's disruption budget")).Should(o.BeTrue())
o.Expect(strings.Contains(out, "There are pending nodes to be drained")).Should(o.BeTrue())
exutil.By("Verify that the pods were not drained from the node")
podsAfterDrain, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("pods", "-n", oc.Namespace(), "-o=jsonpath={.items[?(@.spec.nodeName=='"+worker+"')].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podsInWorker).Should(o.BeIdenticalTo(podsAfterDrain))
})
//author: [email protected]
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-70203-ICSP and IDMS/ITMS can coexist in cluster[Disruptive][Slow]", func() {
exutil.By("Check if any ICSP/IDMS/ITMS exist in the cluster")
//If a cluster contains any ICSP or IDMS or ITMS, it will skip the case
if checkICSPorIDMSorITMS(oc) {
g.Skip("This cluster contain ICSP or IDMS or ITMS, skip the test.")
}
exutil.By("1)Create an ICSP")
icsp := filepath.Join(buildPruningBaseDir, "ImageContentSourcePolicy-1.yaml")
defer func() {
err := checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + icsp).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + icsp).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2)Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("3)Check the config file /etc/containers/registries.conf update as expected")
registryConfig := []string{"location = \"registry.access.redhat.com/ubi8/ubi-minimal\"", "location = \"example.io/example/ubi-minimal\"", "location = \"example.com/example/ubi-minimal\"", "location = \"registry.example.com/example\"", "location = \"mirror.example.net\""}
configPath := "/etc/containers/registries.conf"
err = configExist(oc, registryConfig, configPath)
exutil.AssertWaitPollNoErr(err, "registry config is not set as expected")
/*
//After OCPBUGS-27190 is fixed, will uncomment the code block
exutil.By("4)Create an IDMS with the same registry/mirror config as ICSP but with conflicting policy")
idms := filepath.Join(buildPruningBaseDir, "ImageDigestMirrorSet-conflict.yaml")
out, _ := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", idms).Output()
o.Expect(strings.Contains(out, "XXXXX")).To(o.BeTrue())
*/
exutil.By("5)Create an IDMS with the same registry/mirror config as ICSP")
idms1 := filepath.Join(buildPruningBaseDir, "ImageDigestMirrorSet-1.yaml")
defer func() {
err := checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + idms1).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + idms1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6)Check the mcp doesn't get updated after idms created")
o.Consistently(func() bool {
worker_updated, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updated\")].status}").Output()
worker_updating, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updating\")].status}").Output()
master_updated, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "master", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updated\")].status}").Output()
master_updating, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "master", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updating\")].status}").Output()
return worker_updated == "True" && worker_updating == "False" && master_updated == "True" && master_updating == "False"
}).WithTimeout(60 * time.Second).WithPolling(5 * time.Second).Should(o.BeTrue())
exutil.By("7)Delete the ICSP")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + icsp).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("8)Check the mcp doesn't get updated after icsp deleted")
o.Consistently(func() bool {
worker_updated, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updated\")].status}").Output()
worker_updating, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updating\")].status}").Output()
master_updated, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "master", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updated\")].status}").Output()
master_updating, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "master", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updating\")].status}").Output()
return worker_updated == "True" && worker_updating == "False" && master_updated == "True" && master_updating == "False"
}).WithTimeout(60 * time.Second).WithPolling(5 * time.Second).Should(o.BeTrue())
exutil.By("9)Check the config file /etc/containers/registries.conf keep the same")
registryConfig = []string{"location = \"registry.access.redhat.com/ubi8/ubi-minimal\"", "location = \"example.io/example/ubi-minimal\"", "location = \"example.com/example/ubi-minimal\"", "location = \"registry.example.com/example\"", "location = \"mirror.example.net\""}
configPath = "/etc/containers/registries.conf"
err = configExist(oc, registryConfig, configPath)
exutil.AssertWaitPollNoErr(err, "registry config is not set as expected")
exutil.By("10)Create an ITMS with different registry/mirror config from IDMS")
itms := filepath.Join(buildPruningBaseDir, "ImageTagMirrorSet.yaml")
defer func() {
err := checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + itms).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + itms).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("11)Check mcp finish updating")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("12)Check the config file /etc/containers/registries.conf update as expected")
registryConfig = []string{"location = \"registry.access.redhat.com/ubi9/ubi-minimal\"", "location = \"registry.redhat.io\"", "location = \"mirror.example.com\""}
configPath = "/etc/containers/registries.conf"
err = configExist(oc, registryConfig, configPath)
exutil.AssertWaitPollNoErr(err, "registry config is not set as expected")
})
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-41897-Restricting CPUs for infra and application containers[Disruptive][Slow]", func() {
exutil.By("Check cpu core num on the node")
workerNodes := getWorkersList(oc)
cpu_num := getCpuNum(oc, workerNodes[0])
//This case can only run on a node with more than 4 cpu cores
if cpu_num <= 4 {
g.Skip("This cluster has less than 4 cpu cores, skip the test.")
}
exutil.By("Test for case OCP-41897")
cpuPerformanceprofile := filepath.Join(buildPruningBaseDir, "cpu-performanceprofile.yaml")
perfProfile41897 := cpuPerfProfile{
name: "performance-41897",
isolated: "",
template: cpuPerformanceprofile,
}
isolatedCpu := "0,5-" + strconv.Itoa(cpu_num-1)
perfProfile41897.isolated = isolatedCpu
exutil.By("1)Create a performanceProfile")
//when delete the performanceprofile, only mcp worker will update
defer func() {
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer perfProfile41897.delete(oc)
perfProfile41897.create(oc)
//for 4.14+, master and worker pool need update to change cgroup from v2 to v1, then worker pool update to apply performanceprofile
exutil.By("2)Check the mcp finish updating")
//if cgroup is v2, then mcp master and worker need update to change to v1 first
cgroupV := getCgroupVersion(oc)
if cgroupV == "cgroup2fs" {
err := checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}
// the kubelet get generated when the mcp worker update to apply performanceprofile
exutil.By("3)Check the kubeletconfig get generated")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeletconfig", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, perfProfile41897.name)).Should(o.BeTrue())
e2e.Logf("kubeletconfig exist: [%v], then check the mcp worker finish updating\n", output)
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("4)Check the reserved cpu are as expected")
// 1) "reservedSystemCPUs": "1-4" from /etc/kubernetes/kubelet.conf
// 2) sh-5.1# pgrep systemd |while read i; do taskset -cp $i; done || results: pid 1's current affinity list: 1-4
//isolatedCpu := "0,5-" + strconv.Itoa(cpu_num-1)
reservedCpu := "1-4"
checkReservedCpu(oc, reservedCpu)
})
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-62985-Support disable cpu load balancing and cpu quota on RHEL 9 [Disruptive][Slow]", func() {
// in 4.16, it support cgroupv2; in 4.15-, it only support cgroupv1
exutil.By("Check cpu core num on the node")
workerNodes := getWorkersList(oc)
cpu_num := getCpuNum(oc, workerNodes[0])
//This case can only run on a node with more than 4 cpu cores
if cpu_num <= 4 {
g.Skip("This cluster has less than 4 cpu cores, skip the test.")
}
cpuPerformanceprofile := filepath.Join(buildPruningBaseDir, "cpu-performanceprofile.yaml")
perfProfile62985 := cpuPerfProfile{
name: "performance-62985",
isolated: "",
template: cpuPerformanceprofile,
}
isolatedCpu := "0,5-" + strconv.Itoa(cpu_num-1)
perfProfile62985.isolated = isolatedCpu
exutil.By("1)Create a performanceProfile")
defer func() {
perfProfile62985.delete(oc)
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
perfProfile62985.create(oc)
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("2)Check the reserved cpu are as expected")
// 1) "reservedSystemCPUs": "1-4" from /etc/kubernetes/kubelet.conf
// 2) sh-5.1# pgrep systemd |while read i; do taskset -cp $i; done || results: pid 1's current affinity list: 1-4
reservedCpu := "1-4"
checkReservedCpu(oc, reservedCpu)
exutil.By("3)Turn on cpu info in dmesg log")
defer dmesgTurnOnCpu(oc, "1")
dmesgTurnOnCpu(oc, "0")
exutil.By("4)Create a pod with Guaranteed QoS, using at least a full CPU and load balance/cpu-quota disable annotation")
podCpuLoadBalance62985 := podCpuLoadBalance{
name: "cpu-load-balce-62985",
namespace: oc.Namespace(),
runtimeclass: "performance-performance-62985", //"performance-" + perfProfile62985.name
template: podCpuLoadBalanceTemp,
}
defer podCpuLoadBalance62985.delete(oc)
podCpuLoadBalance62985.create(oc)
exutil.By("5)Check pod Status")
err = podStatus(oc, podCpuLoadBalance62985.namespace, podCpuLoadBalance62985.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("6)Check the cpus are properly having load balance disabled")
checkCpuLoadBalanceDisabled(oc, podCpuLoadBalance62985.namespace, podCpuLoadBalance62985.name)
exutil.By("7)Check cpu-quota is disabled from container scope and pod cgroup correctly")
cgroupV := getCgroupVersion(oc)
checkCpuQuotaDisabled(oc, podCpuLoadBalance62985.namespace, podCpuLoadBalance62985.name, cgroupV)
})
//author: [email protected]
g.It("Author:minmli-NonHyperShiftHOST-NonPreRelease-Longduration-High-73667-High-73412-Crio verify the sigstore signature using default policy when pulling images [Disruptive][Slow]", func() {
exutil.By("1)Enable featureGate of TechPreviewNoUpgrade")
exutil.By("Check if exist any featureSet in featuregate cluster")
featureSet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("featureSet is: %s", featureSet)
if featureSet == "TechPreviewNoUpgrade" {
e2e.Logf("featureSet is TechPreviewNoUpgrade already, no need setting again!")
/*
//comment the part of [featureSet == ""] to abserve the execution of tp profile in CI
} else if featureSet == "" {
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate", "cluster", "-p", "{\"spec\": {\"featureSet\": \"TechPreviewNoUpgrade\"}}", "--type=merge").Output()
if err != nil {
e2e.Failf("Fail to enable TechPreviewNoUpgrade, error:%v", err)
}
exutil.By("check mcp master and worker finish updating")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
} else {
g.Skip("featureSet is neither empty nor TechPreviewNoUpgrade,skip it!")
}
*/
} else {
g.Skip("featureSet is not TechPreviewNoUpgrade,skip it!")
}
exutil.By("2)Check the featureGate take effect")
//featureConfig := []string{"SignatureStores: true", "SigstoreImageVerification: true"} //4.17 be so
featureConfig := []string{"\"SignatureStores\": true", "\"SigstoreImageVerification\": true"} //4.16 be so
kubeletPath := "/etc/kubernetes/kubelet.conf"
err = configExist(oc, featureConfig, kubeletPath)
exutil.AssertWaitPollNoErr(err, "featureGate config check failed")
exutil.By("3)Set the crio loglevel [debug]")
ctrcfgLog := filepath.Join(buildPruningBaseDir, "containerRuntimeConfig_log_level.yaml")
mcpName := "worker"
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + ctrcfgLog).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + ctrcfgLog).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check mcp finish updating")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check the crio loglevel")
nodeName := getSingleWorkerNode(oc)
out, _ := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "crio config | grep log_level")
o.Expect(strings.Contains(string(out), "log_level = \"debug\"")).Should(o.BeTrue())
exutil.By("4)Apply the ClusterImagePolicy manifest")
imgPolicy := filepath.Join(buildPruningBaseDir, "imagePolicy.yaml")
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + imgPolicy).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + imgPolicy).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check mcp finish updating")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("5)Create a pod with an image containing sigstore signature")
podSigstore73667.name = "pod-73667-sig"
podSigstore73667.namespace = oc.Namespace()
defer podSigstore73667.delete(oc)
podSigstore73667.create(oc)
exutil.By("6)Check the pod status")
err = podStatus(oc, podSigstore73667.namespace, podSigstore73667.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("7)check the crio log about sigstore signature verification")
docker_ns := "docker.io"
image := "docker.io/lyman9966/rhel8"
checkSigstoreVerified(oc, podSigstore73667.namespace, podSigstore73667.name, image, docker_ns)
exutil.By("8)validate pulling an image not containing sigstore signature will fail")
nodeName = getSingleWorkerNode(oc)
out, _ = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "crictl pull docker.io/ocpqe/hello-pod:latest")
o.Expect(strings.Contains(string(out), "Source image rejected: A signature was required, but no signature exists")).Should(o.BeTrue())
})
//author: [email protected]
g.It("Author:minmli-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-72080-Verify cpu affinity of container process matches with cpuset cgroup controller interface file cpuset.cpus [Disruptive][Slow]", func() {
//this case verify 3 scenarios:
//1)Verify burstable pods affinity contains all online cpus
//2)when guaranteed pods are created (with integral cpus) , the affinity of burstable pods are modified accordingly to remove any cpus that was used by guaranteed pod
//3)After node reboot, burstable pods affinity should contain all cpus excluding the cpus used by guranteed pods
exutil.By("1)Label a specific worker node")
workerNodes := getWorkersList(oc)
var worker string
for i := 0; i < len(workerNodes); i++ {
readyStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", workerNodes[i], "-o=jsonpath={.status.conditions[?(@.reason=='KubeletReady')].status}").Output()
if readyStatus == "True" {
worker = workerNodes[i]
break
}
}
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("nodes", worker, "node-role.kubernetes.io/worker-affinity-tests-").Output()
_, err := oc.AsAdmin().WithoutNamespace().Run("label").Args("nodes", worker, "node-role.kubernetes.io/worker-affinity-tests=", "--overwrite").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2)Create a machine config pool for the specific worker")
mcpAffinity := filepath.Join(buildPruningBaseDir, "machineconfigpool-affinity.yaml")
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcpAffinity).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcpAffinity).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2.1)Check the mcp finish updating")
mcpName := "worker-affinity-tests"
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("macineconfigpool %v update failed!", mcpName))
//the mcp worker also need updating after mcp worker-affinity-tests finish updating
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("macineconfigpool worker update failed!"))
exutil.By("3)Create a kubeletconfig to enable cpumanager")
kubeconfigCpumager := filepath.Join(buildPruningBaseDir, "kubeletconfig-cpumanager.yaml")
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + kubeconfigCpumager).Execute()
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("macineconfigpool %v update failed!", mcpName))
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kubeconfigCpumager).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.1)Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("macineconfigpool %v update failed!", mcpName))
exutil.By("4)Check one running burstable pod that its cpu affinity include all online cpus")
//select one pod of ns openshift-cluster-node-tuning-operator which is running on the $worker node
burstPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-cluster-node-tuning-operator", "--field-selector=spec.nodeName="+worker, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
coreNum := getCpuNum(oc, worker)
burstNs := "openshift-cluster-node-tuning-operator"
checkCpuAffinityBurst(oc, burstPodName, burstNs, worker, coreNum, "false")
exutil.By("5)Create a guranteed pod with integral cpus")
podGuTemp := filepath.Join(buildPruningBaseDir, "pod-guaranteed.yaml")
podGu72080 := podGuDescription{
name: "gurantee-72080",
namespace: oc.Namespace(),
nodename: worker,
template: podGuTemp,
}
defer podGu72080.delete(oc)
podGu72080.create(oc)
exutil.By("5.1)Check the pod status")
err = podStatus(oc, podGu72080.namespace, podGu72080.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("5.2)Get cpu affinity of the guranteed pod")
gu_affinity := getCpuAffinityFromPod(oc, podGu72080.namespace, podGu72080.name)
exutil.By("6)Check the cpu affinity of burstable pod changed after creating the guranteed pod")
checkCpuAffinityBurst(oc, burstPodName, burstNs, worker, coreNum, gu_affinity)
exutil.By("7)Delete the guranteed pod")
podGu72080.delete(oc)
exutil.By("8)Check the cpu affinity of burstable pod revert after deleting the guranteed pod")
// there exist a bug currently, when deleting the pod, the cpu affinity of burstable pod can't revert in a short time
//checkCpuAffinityBurst(oc, burstPodName, burstNs, worker, coreNum, "false")
exutil.By("9)Create a deployment with guranteed pod with integral cpus")
deployGuTemp := filepath.Join(buildPruningBaseDir, "guaranteed-deployment.yaml")
deploy := NewDeploymentWithNode("guarantee-72080", oc.Namespace(), "1", worker, deployGuTemp)
defer deploy.delete(oc)
deploy.create(oc)
deploy.waitForCreation(oc, 5)
exutil.By("9.1)Get cpu affinity of the guranteed pod owned by the deployment")
guPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", oc.Namespace(), "--field-selector", "spec.nodeName="+worker, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
gu_affinity = getCpuAffinityFromPod(oc, oc.Namespace(), guPodName)
exutil.By("10)Check the cpu affinity of burstable pod changed after creating the deployment")
checkCpuAffinityBurst(oc, burstPodName, burstNs, worker, coreNum, gu_affinity)
exutil.By("11)Reboot the node")
defer checkNodeStatus(oc, worker, "Ready")
rebootNode(oc, worker)
checkNodeStatus(oc, worker, "NotReady")
checkNodeStatus(oc, worker, "Ready")
exutil.By("12)Check the cpu affinity of burstable pod contain all cpus excluding the cpus used by guranteed pods")
deploy.waitForCreation(oc, 5)
guPodName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", oc.Namespace(), "--field-selector", "spec.nodeName="+worker, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
gu_affinity = getCpuAffinityFromPod(oc, oc.Namespace(), guPodName)
burstPodName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-cluster-node-tuning-operator", "--field-selector=spec.nodeName="+worker, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
checkCpuAffinityBurst(oc, burstPodName, burstNs, worker, coreNum, gu_affinity)
})
//author: [email protected]
g.It("Author:asahay-High-78394-Make CRUN as Default Runtime for 4.18", func() {
exutil.By("1) Check Cluster Version")
clusterVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cluster version: %s\n", clusterVersion)
var expectedRuntime string
if strings.Contains(clusterVersion, "4.18") {
expectedRuntime = "crun"
} else {
expectedRuntime = "runc"
}
exutil.By("2) Check all Nodes are Up and Default Runtime is crun")
defaultRuntimeCheck(oc, expectedRuntime)
})
g.It("Author:asahay-NonPreRelease-Longduration-High-78610-Default Runtime can be Updated to runc in 4.18[Serial]", func() {
exutil.By("1) Check Cluster Version")
clusterVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cluster version: %s\n", clusterVersion)
exutil.By("2.1) Apply ContainerRuntimeConfig install manifest on Worker node to request defaultRuntime to runc ")
ContainerRuntimeConfigTemp1 := filepath.Join(buildPruningBaseDir, "ContainerRuntimeConfigWorker-78610.yaml")
defer func() {
err := oc.AsAdmin().Run("delete").Args("-f=" + ContainerRuntimeConfigTemp1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
mcpname1 := "worker"
err = checkMachineConfigPoolStatus(oc, mcpname1)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err1 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f=" + ContainerRuntimeConfigTemp1).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
exutil.By("2.2) Apply ContainerRuntimeConfig install manifest on Master node to request defaultRuntime to runc ")
ContainerRuntimeConfigTemp2 := filepath.Join(buildPruningBaseDir, "ContainerRuntimeConfigMaster-78610.yaml")
defer func() {
err := oc.AsAdmin().Run("delete").Args("-f=" + ContainerRuntimeConfigTemp2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
mcpname2 := "master"
err = checkMachineConfigPoolStatus(oc, mcpname2)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err2 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f=" + ContainerRuntimeConfigTemp2).Execute()
o.Expect(err2).NotTo(o.HaveOccurred())
exutil.By("3) Wait for MCP to Finish Update")
exutil.By("Check mcp finish rolling out")
oc.NotShowInfo()
mcpName1 := "worker"
mcpName2 := "master"
err3 := checkMachineConfigPoolStatus(oc, mcpName1)
exutil.AssertWaitPollNoErr(err3, "macineconfigpool worker update failed")
err4 := checkMachineConfigPoolStatus(oc, mcpName2)
exutil.AssertWaitPollNoErr(err4, "macineconfigpool master update failed")
//for checking machine config pool
mcp, err5 := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp").Output()
o.Expect(err5).NotTo(o.HaveOccurred())
e2e.Logf("\n Machine config pools are:\n %s", mcp)
exutil.By("4) Check the Default Runtime Value")
UpdatedRuntimeCheck(oc, "runc")
})
})
var _ = g.Describe("[sig-node] NODE keda", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("keda-operator", exutil.KubeConfigPath())
cmaKedaControllerTemplate string
buildPruningBaseDir = exutil.FixturePath("testdata", "node")
sub subscriptionDescription
)
g.BeforeEach(func() {
// skip ARM64 arch
architecture.SkipNonAmd64SingleArch(oc)
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
cmaKedaControllerTemplate = filepath.Join(buildPruningBaseDir, "cma-keda-controller-template.yaml")
sub.skipMissingCatalogsources(oc)
createKedaOperator(oc)
})
// author: [email protected]
g.It("Author:weinliu-LEVEL0-StagerunBoth-High-52383-Keda Install", func() {
g.By("CMA (Keda) operator has been installed successfully")
})
// author: [email protected]
g.It("Author:weinliu-StagerunBoth-High-62570-Verify must-gather tool works with CMA", func() {
var (
mustgatherName = "mustgather" + getRandomString()
mustgatherDir = "/tmp/" + mustgatherName
mustgatherLog = mustgatherName + ".log"
logFile string
)
g.By("Get the mustGatherImage")
mustGatherImage, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("packagemanifest", "-n=openshift-marketplace", "openshift-custom-metrics-autoscaler-operator", "-o=jsonpath={.status.channels[?(.name=='stable')].currentCSVDesc.annotations.containerImage}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Running the must gather command \n")
defer os.RemoveAll(mustgatherDir)
logFile, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--dest-dir="+mustgatherDir, "--image="+mustGatherImage).Output()
if err != nil {
e2e.Logf("mustgather created from image %v in %v logged to %v,%v %v", mustGatherImage, mustgatherDir, mustgatherLog, logFile, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
})
// author: [email protected]
g.It("Author:weinliu-High-60961-Audit logging test - stdout Metadata[Serial]", func() {
g.By("Create KedaController with log level Metadata")
g.By("Create CMA Keda Controller ")
cmaKedaController := cmaKedaControllerDescription{
level: "Metadata",
template: cmaKedaControllerTemplate,
name: "keda",
namespace: "openshift-keda",
}
defer cmaKedaController.delete(oc)
cmaKedaController.create(oc)
metricsApiserverPodName := getPodNameByLabel(oc, "openshift-keda", "app=keda-metrics-apiserver")
waitPodReady(oc, "openshift-keda", "app=keda-metrics-apiserver")
g.By("Check the Audit Logged as configed")
log, err := exutil.GetSpecificPodLogs(oc, "openshift-keda", "", metricsApiserverPodName[0], "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(log, "\"level\":\"Metadata\"")).Should(o.BeTrue())
})
g.It("Author:asahay-High-60962-Audit logging test - stdout Request[Serial]", func() {
g.By("Create KedaController with log level Request")
g.By("Create CMA Keda Controller ")
cmaKedaController := cmaKedaControllerDescription{
level: "Request",
template: cmaKedaControllerTemplate,
name: "keda",
namespace: "openshift-keda",
}
defer cmaKedaController.delete(oc)
cmaKedaController.create(oc)
metricsApiserverPodName := getPodNameByLabel(oc, "openshift-keda", "app=keda-metrics-apiserver")
waitPodReady(oc, "openshift-keda", "app=keda-metrics-apiserver")
g.By("Check the Audit Logged as configed")
log, err := exutil.GetSpecificPodLogs(oc, "openshift-keda", "", metricsApiserverPodName[0], "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(log, "\"level\":\"Request\"")).Should(o.BeTrue())
})
g.It("Author:asahay-High-60963-Audit logging test - stdout RequestResponse[Serial]", func() {
g.By("Create KedaController with log level RequestResponse")
g.By("Create CMA Keda Controller ")
cmaKedaController := cmaKedaControllerDescription{
level: "RequestResponse",
template: cmaKedaControllerTemplate,
name: "keda",
namespace: "openshift-keda",
}
defer cmaKedaController.delete(oc)
cmaKedaController.create(oc)
metricsApiserverPodName := getPodNameByLabel(oc, "openshift-keda", "app=keda-metrics-apiserver")
waitPodReady(oc, "openshift-keda", "app=keda-metrics-apiserver")
g.By("Check the Audit Logged as configed")
log, err := exutil.GetSpecificPodLogs(oc, "openshift-keda", "", metricsApiserverPodName[0], "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(log, "\"level\":\"RequestResponse\"")).Should(o.BeTrue())
})
//Author: [email protected]
g.It("Author:asahay-High-60964-Audit logging test - Writing to PVC [Serial]", func() {
exutil.By("1) Create a PVC")
pvc := filepath.Join(buildPruningBaseDir, "pvc-60964.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+pvc, "-n", "openshift-keda").Execute()
err := oc.AsAdmin().Run("create").Args("-f="+pvc, "-n", "openshift-keda").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2) Create KedaController with log level Metdata")
exutil.By("Create CMA Keda Controller ")
pvcKedaControllerTemp := filepath.Join(buildPruningBaseDir, "pvcKedaControllerTemp-60964.yaml")
pvcKedaController := pvcKedaControllerDescription{
level: "Metadata",
template: pvcKedaControllerTemp,
name: "keda",
namespace: "openshift-keda",
watchNamespace: "openshift-keda",
}
defer pvcKedaController.delete(oc)
pvcKedaController.create(oc)
metricsApiserverPodName := getPodNameByLabel(oc, "openshift-keda", "app=keda-metrics-apiserver")
waitPodReady(oc, "openshift-keda", "app=keda-metrics-apiserver")
var output string
exutil.By("3) Checking PVC creation")
output, err = oc.AsAdmin().Run("get").Args("pvc", "-n", "openshift-keda").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("PVC is %v", output)
exutil.By("4) Checking KEDA Controller")
errCheck := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("get").Args("KedaController", "-n", "openshift-keda").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "keda") {
e2e.Logf("Keda Controller has been created Successfully!")
return true, nil
}
return false, nil
})
e2e.Logf("Output is %s", output)
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("KedaController has not been created"))
exutil.By("5) Checking status of pods")
waitPodReady(oc, "openshift-keda", "app=keda-metrics-apiserver")
exutil.By("6) Verifying audit logs for 'Metadata'")
errCheck = wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
auditOutput := ExecCommandOnPod(oc, metricsApiserverPodName[0], "openshift-keda", "tail $(ls -t /var/audit-policy/log*/log-out-pvc | head -1)")
if strings.Contains(auditOutput, "Metadata") {
e2e.Logf("Audit log contains 'Metadata ")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Audit Log does not contain Metadata"))
})
// author: [email protected]
g.It("Author:weinliu-Critical-52384-Automatically scaling pods based on Kafka Metrics[Serial][Slow]", func() {
var (
scaledObjectStatus string
)
exutil.By("Create a kedacontroller with default template")
kedaControllerDefault := filepath.Join(buildPruningBaseDir, "keda-controller-default.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", "openshift-keda", "KedaController", "keda").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kedaControllerDefault).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kafaksNs := "kafka-52384"
defer deleteProject(oc, kafaksNs)
createProject(oc, kafaksNs)
//Create kafak
exutil.By("Subscribe to AMQ operator")
defer removeAmqOperator(oc)
createAmqOperator(oc)
exutil.By("Test for case OCP-52384")
exutil.By(" 1) Create a Kafka instance")
kafka := filepath.Join(buildPruningBaseDir, "kafka-52384.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + kafka).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kafka).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2) Create a Kafka topic")
kafkaTopic := filepath.Join(buildPruningBaseDir, "kafka-topic-52384.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + kafkaTopic).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kafkaTopic).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3) Check if Kafka and Kafka topic are ready")
// Wait for Kafka and KafkaTopic to be ready
waitForKafkaReady(oc, "my-cluster", kafaksNs)
namespace := oc.Namespace()
exutil.By("4) Create a Kafka Consumer")
kafkaConsumerDeployment := filepath.Join(buildPruningBaseDir, "kafka-consumer-deployment-52384.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+kafkaConsumerDeployment, "-n", namespace).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+kafkaConsumerDeployment, "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5) Create a scaledobjectc")
kafkaScaledobject := filepath.Join(buildPruningBaseDir, "kafka-scaledobject-52384.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+kafkaScaledobject, "-n", namespace).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+kafkaScaledobject, "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5.1) Check ScaledObject is up")
err = wait.Poll(3*time.Second, 300*time.Second, func() (bool, error) {
scaledObjectStatus, _ = oc.AsAdmin().Run("get").Args("ScaledObject", "kafka-amqstreams-consumer-scaledobject", "-o=jsonpath={.status.health.s0-kafka-my-topic.status}", "-n", namespace).Output()
if scaledObjectStatus == "Happy" {
e2e.Logf("ScaledObject is up and working")
return true, nil
}
e2e.Logf("ScaledObject is not in working status, current status: %v", scaledObjectStatus)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "scaling failed")
exutil.By("Kafka scaling is up and ready")
exutil.By("6)Create a Kafka load")
kafkaLoad := filepath.Join(buildPruningBaseDir, "kafka-load-52384.yaml")
defer oc.AsAdmin().Run("delete").Args("jobs", "--field-selector", "status.successful=1", "-n", namespace).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+kafkaLoad, "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6.1) Check ScaledObject is up")
err = wait.Poll(3*time.Second, 300*time.Second, func() (bool, error) {
scaledObjectStatus, _ = oc.AsAdmin().Run("get").Args("ScaledObject", "kafka-amqstreams-consumer-scaledobject", "-o=jsonpath={.status.health.s0-kafka-my-topic.status}", "-n", namespace).Output()
if scaledObjectStatus == "Happy" {
e2e.Logf("ScaledObject is up and working")
return true, nil
}
e2e.Logf("ScaledObject is not in working status, current status: %v", scaledObjectStatus)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "scaling failed")
exutil.By("Kafka scaling is up and ready")
})
// author: [email protected]
g.It("Author:weinliu-ConnectedOnly-Critical-52385-Automatically scaling pods based on Prometheus metrics[Serial]", func() {
exutil.By("Create a kedacontroller with default template")
kedaControllerDefault := filepath.Join(buildPruningBaseDir, "keda-controller-default.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", "openshift-keda", "KedaController", "keda").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kedaControllerDefault).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
var scaledObjectStatus string
triggerAuthenticationTempl := filepath.Join(buildPruningBaseDir, "triggerauthentication-52385.yaml")
triggerAuthentication52385 := triggerAuthenticationDescription{
secretname: "",
namespace: "",
template: triggerAuthenticationTempl,
}
cmaNs := "cma-52385"
defer deleteProject(oc, cmaNs)
createProject(oc, cmaNs)
exutil.By("1) Create OpenShift monitoring for user-defined projects")
// Look for cluster-level monitoring configuration
getOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Enable user workload monitoring
if len(getOutput) > 0 {
exutil.By("ConfigMap cluster-monitoring-config exists, extracting cluster-monitoring-config ...")
extractOutput, _, _ := oc.AsAdmin().WithoutNamespace().Run("extract").Args("ConfigMap/cluster-monitoring-config", "-n", "openshift-monitoring", "--to=-").Outputs()
//if strings.Contains(strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(extractOutput, "'", ""), "\"", ""), " ", ""), "enableUserWorkload:true") {
cleanedOutput := strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(extractOutput, "'", ""), "\"", ""), " ", "")
e2e.Logf("cleanedOutput is %s", cleanedOutput)
if matched, _ := regexp.MatchString("enableUserWorkload:\\s*true", cleanedOutput); matched {
exutil.By("User workload is enabled, doing nothing ... ")
} else {
exutil.By("User workload is not enabled, enabling ...")
exutil.By("Get current monitoring configuration to recover")
originclusterMonitoringConfig, getContentError := oc.AsAdmin().Run("get").Args("ConfigMap/cluster-monitoring-config", "-ojson", "-n", "openshift-monitoring").Output()
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfig, getContentError = sjson.Delete(originclusterMonitoringConfig, `metadata.resourceVersion`)
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfig, getContentError = sjson.Delete(originclusterMonitoringConfig, `metadata.uid`)
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfigFilePath := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-52385.json")
o.Expect(os.WriteFile(originclusterMonitoringConfigFilePath, []byte(originclusterMonitoringConfig), 0644)).NotTo(o.HaveOccurred())
defer func() {
errReplace := oc.AsAdmin().WithoutNamespace().Run("replace").Args("-f", originclusterMonitoringConfigFilePath).Execute()
o.Expect(errReplace).NotTo(o.HaveOccurred())
}()
exutil.By("Deleting current monitoring configuration")
oc.WithoutNamespace().AsAdmin().Run("delete").Args("ConfigMap/cluster-monitoring-config", "-n", "openshift-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create my monitoring configuration")
prometheusConfigmap := filepath.Join(buildPruningBaseDir, "prometheus-configmap.yaml")
_, err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-f=" + prometheusConfigmap).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
} else {
e2e.Logf("ConfigMap cluster-monitoring-config does not exist, creating ...")
prometheusConfigmap := filepath.Join(buildPruningBaseDir, "prometheus-configmap.yaml")
defer func() {
errDelete := oc.WithoutNamespace().AsAdmin().Run("delete").Args("-f=" + prometheusConfigmap).Execute()
o.Expect(errDelete).NotTo(o.HaveOccurred())
}()
_, err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-f=" + prometheusConfigmap).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("2) Deploy application that exposes Prometheus metrics")
prometheusComsumer := filepath.Join(buildPruningBaseDir, "prometheus-comsumer-deployment.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+prometheusComsumer, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+prometheusComsumer, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2.1) Verify the deployment is available")
errCheck := wait.Poll(20*time.Second, 280*time.Second, func() (bool, error) {
output, err1 := oc.AsAdmin().Run("get").Args("deployment", "-n", cmaNs).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if strings.Contains(output, "test-app") && strings.Contains(output, "1/1") {
e2e.Logf("Deployment has been created Sucessfully!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Depolyment has not been created"))
exutil.By("3) Create a Service Account")
defer oc.WithoutNamespace().AsAdmin().Run("delete").Args("sa", "thanos-52385", "-n", cmaNs).Execute()
err = oc.WithoutNamespace().AsAdmin().Run("create").Args("sa", "thanos-52385", "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.1) Create Service Account Token")
servicetokenTemp := filepath.Join(buildPruningBaseDir, "servicetoken-52385.yaml")
token, err := oc.AsAdmin().SetNamespace(cmaNs).Run("apply").Args("-f", servicetokenTemp).Output()
e2e.Logf("err %v, token %v", err, token)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.2) Make sure the token is available")
serviceToken, err := oc.AsAdmin().Run("get").Args("secret", "thanos-token", "-n", cmaNs).Output()
e2e.Logf("err %v, token %v", err, serviceToken)
o.Expect(err).NotTo(o.HaveOccurred())
saTokenName := "thanos-token"
exutil.By("4) Define TriggerAuthentication with the Service Account's token")
triggerAuthentication52385.secretname = string(saTokenName[:])
triggerAuthentication52385.namespace = cmaNs
defer oc.AsAdmin().Run("delete").Args("-n", cmaNs, "TriggerAuthentication", "keda-trigger-auth-prometheus").Execute()
triggerAuthentication52385.create(oc)
exutil.By("4.1) Check TriggerAuthentication is Available")
triggerauth, err := oc.AsAdmin().Run("get").Args("TriggerAuthentication", "-n", cmaNs).Output()
e2e.Logf("Triggerauthentication is %v", triggerauth)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5) Create a role for reading metric from Thanos")
role := filepath.Join(buildPruningBaseDir, "role.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+role, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+role, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5.1) Check Role is Available")
rolecheck, err := oc.AsAdmin().Run("get").Args("Role", "-n", cmaNs).Output()
e2e.Logf("Role %v", rolecheck)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5.2) Add the role for reading metrics from Thanos to the Service Account")
rolebinding := filepath.Join(buildPruningBaseDir, "rolebinding-52385.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+rolebinding, "-n", cmaNs).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+rolebinding, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6) Deploy ScaledObject to enable application autoscaling")
scaledobject := filepath.Join(buildPruningBaseDir, "scaledobject-52385.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+scaledobject, "-n", cmaNs).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+scaledobject, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6.1) Check ScaledObject is up")
err = wait.Poll(3*time.Second, 100*time.Second, func() (bool, error) {
scaledObjectStatus, _ = oc.AsAdmin().Run("get").Args("ScaledObject", "prometheus-scaledobject", "-o=jsonpath={.status.health.s0-prometheus.status}", "-n", cmaNs).Output()
if scaledObjectStatus == "Happy" {
e2e.Logf("ScaledObject is up and working")
return true, nil
}
e2e.Logf("ScaledObject is not in working status, current status: %v", scaledObjectStatus)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "scaling failed")
exutil.By("prometheus scaling is up and ready")
exutil.By("7) Generate requests to test the application autoscaling")
load := filepath.Join(buildPruningBaseDir, "load-52385.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+load, "-n", cmaNs).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+load, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("7.1) Check ScaledObject is up")
err = wait.Poll(3*time.Second, 100*time.Second, func() (bool, error) {
scaledObjectStatus, _ = oc.AsAdmin().Run("get").Args("ScaledObject", "prometheus-scaledobject", "-o=jsonpath={.status.health.s0-prometheus.status}", "-n", cmaNs).Output()
if scaledObjectStatus == "Happy" {
e2e.Logf("ScaledObject is up and working")
return true, nil
}
e2e.Logf("ScaledObject is not in working status, current status: %v", scaledObjectStatus)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "scaling failed")
exutil.By("prometheus scaling is up and ready")
})
//author: [email protected]
g.It("Author:asahay-ConnectedOnly-Critical-73296-KEDA-Operator is missing files causing cron triggers with Timezone Failure [Serial]", func() {
exutil.By("Create a kedacontroller with default template")
kedaControllerDefault := filepath.Join(buildPruningBaseDir, "keda-controller-default.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", "openshift-keda", "KedaController", "keda").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kedaControllerDefault).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
triggerAuthenticationTempl := filepath.Join(buildPruningBaseDir, "triggerauthentication-73296.yaml")
triggerAuthentication73296 := triggerAuthenticationDescription{
secretname: "",
namespace: "",
template: triggerAuthenticationTempl,
}
cmaNs := "cma-73296"
defer deleteProject(oc, cmaNs)
createProject(oc, cmaNs)
exutil.By("1) Create OpenShift monitoring for user-defined projects")
// Look for cluster-level monitoring configuration
getOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Enable user workload monitoring
if len(getOutput) > 0 {
exutil.By("ConfigMap cluster-monitoring-config exists, extracting cluster-monitoring-config ...")
extractOutput, _, _ := oc.AsAdmin().WithoutNamespace().Run("extract").Args("ConfigMap/cluster-monitoring-config", "-n", "openshift-monitoring", "--to=-").Outputs()
//if strings.Contains(strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(extractOutput, "'", ""), "\"", ""), " ", ""), "enableUserWorkload:true") {
cleanedOutput := strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(extractOutput, "'", ""), "\"", ""), " ", "")
e2e.Logf("cleanedOutput is %s", cleanedOutput)
if matched, _ := regexp.MatchString("enableUserWorkload:\\s*true", cleanedOutput); matched {
exutil.By("User workload is enabled, doing nothing ... ")
} else {
exutil.By("User workload is not enabled, enabling ...")
exutil.By("Get current monitoring configuration to recover")
originclusterMonitoringConfig, getContentError := oc.AsAdmin().Run("get").Args("ConfigMap/cluster-monitoring-config", "-ojson", "-n", "openshift-monitoring").Output()
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfig, getContentError = sjson.Delete(originclusterMonitoringConfig, `metadata.resourceVersion`)
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfig, getContentError = sjson.Delete(originclusterMonitoringConfig, `metadata.uid`)
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfigFilePath := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-73296.json")
o.Expect(os.WriteFile(originclusterMonitoringConfigFilePath, []byte(originclusterMonitoringConfig), 0644)).NotTo(o.HaveOccurred())
defer func() {
errReplace := oc.AsAdmin().WithoutNamespace().Run("replace").Args("-f", originclusterMonitoringConfigFilePath).Execute()
o.Expect(errReplace).NotTo(o.HaveOccurred())
}()
exutil.By("Deleting current monitoring configuration")
oc.WithoutNamespace().AsAdmin().Run("delete").Args("ConfigMap/cluster-monitoring-config", "-n", "openshift-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create my monitoring configuration")
prometheusConfigmap := filepath.Join(buildPruningBaseDir, "prometheus-configmap.yaml")
_, err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-f=" + prometheusConfigmap).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
} else {
e2e.Logf("ConfigMap cluster-monitoring-config does not exist, creating ...")
prometheusConfigmap := filepath.Join(buildPruningBaseDir, "prometheus-configmap.yaml")
defer func() {
errDelete := oc.WithoutNamespace().AsAdmin().Run("delete").Args("-f=" + prometheusConfigmap).Execute()
o.Expect(errDelete).NotTo(o.HaveOccurred())
}()
_, err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-f=" + prometheusConfigmap).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("2) Deploy application that exposes Prometheus metrics")
prometheusComsumer := filepath.Join(buildPruningBaseDir, "prometheus-comsumer-deployment.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+prometheusComsumer, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+prometheusComsumer, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3) Create a Service Account")
defer oc.WithoutNamespace().AsAdmin().Run("delete").Args("sa", "thanos-73296", "-n", cmaNs).Execute()
err = oc.WithoutNamespace().AsAdmin().Run("create").Args("sa", "thanos-73296", "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.1) Create Service Account Token")
servicetokenTemp := filepath.Join(buildPruningBaseDir, "servicetoken-73296.yaml")
token, err := oc.AsAdmin().SetNamespace(cmaNs).Run("apply").Args("-f", servicetokenTemp).Output()
e2e.Logf("err %v, token %v", err, token)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.2) Make sure the token is still there and didn't get deleted")
serviceToken, err := oc.AsAdmin().Run("get").Args("secret", "thanos-token", "-n", cmaNs).Output()
e2e.Logf("err %v, token %v", err, serviceToken)
o.Expect(err).NotTo(o.HaveOccurred())
saTokenName := "thanos-token"
exutil.By("3.3) Define TriggerAuthentication with the Service Account's token")
triggerAuthentication73296.secretname = string(saTokenName[:])
triggerAuthentication73296.namespace = cmaNs
defer oc.AsAdmin().Run("delete").Args("-n", cmaNs, "TriggerAuthentication", "keda-trigger-auth-prometheus").Execute()
triggerAuthentication73296.create(oc)
exutil.By("4) Create a role for reading metric from Thanos")
role := filepath.Join(buildPruningBaseDir, "role.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+role, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+role, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5) Add the role for reading metrics from Thanos to the Service Account")
rolebinding := filepath.Join(buildPruningBaseDir, "rolebinding-73296.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+rolebinding, "-n", cmaNs).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+rolebinding, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6) Create a Test Deployment")
testDeploymentTemp := filepath.Join(buildPruningBaseDir, "testdeployment-73296.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+testDeploymentTemp, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+testDeploymentTemp, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6.1) Verify the deployment is available")
errCheck := wait.Poll(20*time.Second, 280*time.Second, func() (bool, error) {
output, err1 := oc.AsAdmin().Run("get").Args("deployment", "-n", cmaNs).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if strings.Contains(output, "busybox") && strings.Contains(output, "1/1") {
e2e.Logf("Deployment has been created Sucessfully!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Depolyment has not been created"))
exutil.By("7) Create a ScaledObject with a cron trigger with timezone applied.")
timezoneScaledObjectTemp := filepath.Join(buildPruningBaseDir, "timezonescaledobject-73296.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+timezoneScaledObjectTemp, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+timezoneScaledObjectTemp, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("7.1) Verifying the scaledobject readiness")
errCheck = wait.Poll(20*time.Second, 380*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("get").Args("scaledobject", "cron-scaledobject", "-n", cmaNs, "-o", "jsonpath={.status.conditions[?(@.status=='True')].status} {.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if output == "True True True" {
e2e.Logf("ScaledObject is Active and Running.")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("ScaledObject is not ready"))
PodName := getPodNameByLabel(oc, "openshift-keda", "app=keda-operator")
waitPodReady(oc, "openshift-keda", "app=keda-operator")
exutil.By(" 8) Check the Logs Containig INFO Reconciling ScaledObject")
log, err := exutil.GetSpecificPodLogs(oc, "openshift-keda", "", PodName[0], "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(log, "INFO\tReconciling ScaledObject")).Should(o.BeTrue())
})
// author: [email protected]
g.It("Author:asahay-High-60966-CMA Scale applications based on memory metrics [Serial]", func() {
exutil.By("1) Create a kedacontroller with default template")
kedaControllerDefault := filepath.Join(buildPruningBaseDir, "keda-controller-default.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", "openshift-keda", "KedaController", "keda").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kedaControllerDefault).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cmaNs := "cma-60966"
defer deleteProject(oc, cmaNs)
createProject(oc, cmaNs)
var output string
exutil.By("2) Creating a Keda HPA deployment")
kedaHPADemoDeploymentTemp := filepath.Join(buildPruningBaseDir, "keda-hpa-demo-deployment.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+kedaHPADemoDeploymentTemp, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+kedaHPADemoDeploymentTemp, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3) Verify the deployment is available")
errCheck := wait.Poll(20*time.Second, 280*time.Second, func() (bool, error) {
output, err1 := oc.AsAdmin().Run("get").Args("deployment", "-n", cmaNs).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if strings.Contains(output, "keda-hpa-demo-deployment") && strings.Contains(output, "1/1") {
e2e.Logf("Deployment has been created Sucessfully!")
return true, nil
}
return false, nil
})
e2e.Logf("Output: %v", output)
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Depolyment has not been created"))
exutil.By("4) Creating a ScaledObject")
memScaledObjectTemp := filepath.Join(buildPruningBaseDir, "mem-scaledobject.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+memScaledObjectTemp, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+memScaledObjectTemp, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5) Verifying the scaledobject readiness")
errCheck = wait.Poll(20*time.Second, 380*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("get").Args("scaledobject", "mem-scaledobject", "-n", cmaNs, "-o", "jsonpath={.status.conditions[?(@.status=='True')].status} {.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if output == "True True True" {
e2e.Logf("ScaledObject is Active and Running.")
return true, nil
}
return false, nil
})
e2e.Logf("Output: %v", output)
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("ScaledObject is not ready"))
exutil.By("6) Checking HPA status using jsonpath")
errCheck = wait.Poll(20*time.Second, 380*time.Second, func() (bool, error) {
output, err = oc.AsAdmin().Run("get").Args("hpa", "keda-hpa-mem-scaledobject", "-n", cmaNs, "-o", "jsonpath={.spec.minReplicas} {.spec.maxReplicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// The lower limit for the number of replicas to which the autoscaler can scale down is 1 and the upper limit for the number of replicas to which the autoscaler can scale up is 10
if strings.Contains(output, "1 10") {
e2e.Logf("HPA is configured correctly as expected!")
return true, nil
}
return false, nil
})
e2e.Logf("Output: %v", output)
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("HPA status check failed"))
exutil.By("7) Describing HPA to verify conditions")
errCheck = wait.Poll(20*time.Second, 380*time.Second, func() (bool, error) {
output, err = oc.AsAdmin().Run("get").Args("hpa", "keda-hpa-mem-scaledobject", "-n", cmaNs, "-o", "jsonpath={.status.conditions[?(@.type=='AbleToScale')].status} {.status.conditions[?(@.type=='ScalingActive')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if output == "True True" {
e2e.Logf("HPA conditions are as expected: AbleToScale is True, ScalingActive is True.")
return true, nil
}
return false, nil
})
e2e.Logf("Output: %v", output)
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("HPA conditions are not met"))
})
})
var _ = g.Describe("[sig-node] NODE VPA Vertical Pod Autoscaler", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vpa-operator", exutil.KubeConfigPath())
buildPruningBaseDir = exutil.FixturePath("testdata", "node")
)
g.BeforeEach(func() {
exutil.SkipMissingQECatalogsource(oc)
createVpaOperator(oc)
})
// author: [email protected]
g.It("Author:weinliu-DEPRECATED-StagerunBoth-High-60991-VPA Install", func() {
g.By("VPA operator is installed successfully")
})
// author: [email protected]
g.It("Author:weinliu-High-70961-Allow cluster admins to specify VPA API client rates and memory-saver [Serial]", func() {
g.By("VPA operator is installed successfully")
exutil.By("Create a new VerticalPodAutoscalerController ")
vpaNs := "openshift-vertical-pod-autoscaler"
vpacontroller := filepath.Join(buildPruningBaseDir, "vpacontroller-70961.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+vpacontroller, "-n", vpaNs).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+vpacontroller, "-n", vpaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check VPA operator's args")
recommenderArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70961", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.recommender.container.args}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("[\"--kube-api-qps=20.0\",\"--kube-api-burst=60.0\",\"--memory-saver=true\"]").Should(o.Equal(recommenderArgs))
admissioinArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70961", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.admission.container.args}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("[\"--kube-api-qps=30.0\",\"--kube-api-burst=40.0\"]").Should(o.Equal(admissioinArgs))
updaterArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70961", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.updater.container.args}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("[\"--kube-api-qps=20.0\",\"--kube-api-burst=80.0\"]").Should(o.Equal(updaterArgs))
})
// author: [email protected]
g.It("Author:weinliu-High-70962-Allow cluster admins to specify CPU & Memory requests and limits of VPA controllers [Serial]", func() {
exutil.By("VPA operator is installed successfully")
exutil.By("Create a new VerticalPodAutoscalerController ")
vpaNs := "openshift-vertical-pod-autoscaler"
vpacontroller := filepath.Join(buildPruningBaseDir, "vpacontroller-70962.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+vpacontroller, "-n", vpaNs).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+vpacontroller, "-n", vpaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check VPA operator's args")
recommenderArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70962", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.recommender.container.resources.requests}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("{\"cpu\":\"60m\",\"memory\":\"60Mi\"}").Should(o.Equal(recommenderArgs))
admissioinArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70962", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.admission.container.resources.requests}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("{\"cpu\":\"40m\",\"memory\":\"40Mi\"}").Should(o.Equal(admissioinArgs))
updaterArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70962", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.updater.container.resources.requests}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("{\"cpu\":\"80m\",\"memory\":\"80Mi\"}").Should(o.Equal(updaterArgs))
})
})
var _ = g.Describe("[sig-node] NODE Install and verify Cluster Resource Override Admission Webhook", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("clusterresourceoverride-operator", exutil.KubeConfigPath())
)
g.BeforeEach(func() {
g.By("Skip test when precondition not meet !!!")
exutil.SkipMissingQECatalogsource(oc)
installOperatorClusterresourceoverride(oc)
})
// author: [email protected]
g.It("Author:asahay-StagerunBoth-High-27070-Cluster Resource Override Operator. [Serial]", func() {
defer deleteAPIService(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ClusterResourceOverride", "cluster", "-n", "clusterresourceoverride-operator").Execute()
createCRClusterresourceoverride(oc)
var err error
var croCR string
errCheck := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
croCR, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterResourceOverride", "cluster", "-n", "clusterresourceoverride-operator").Output()
if err != nil {
e2e.Logf("error %v, please try next round", err)
return false, nil
}
if !strings.Contains(croCR, "cluster") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("can not get cluster with output %v, the error is %v", croCR, err))
e2e.Logf("Operator is installed successfully")
})
g.It("Author:asahay-Medium-27075-Testing the config changes. [Serial]", func() {
defer deleteAPIService(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ClusterResourceOverride", "cluster").Execute()
createCRClusterresourceoverride(oc)
var err error
var croCR string
errCheck := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
croCR, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterResourceOverride", "cluster", "-n", "clusterresourceoverride-operator").Output()
if err != nil {
e2e.Logf("error %v, please try next round", err)
return false, nil
}
if !strings.Contains(croCR, "cluster") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("can not get cluster with output %v, the error is %v", croCR, err))
e2e.Logf("Operator is installed successfully")
g.By("Testing the changes\n")
testCRClusterresourceoverride(oc)
})
})
|
package node
| ||||
test case
|
openshift/openshift-tests-private
|
eec8556e-2e39-4970-96e1-7ca335343205
|
DEPRECATED-Author:pmali-High-12893-Init containers with restart policy Always
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("DEPRECATED-Author:pmali-High-12893-Init containers with restart policy Always", func() {
oc.SetupProject()
podModify.name = "init-always-fail"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "exit 1"
podModify.restartPolicy = "Always"
g.By("create FAILED init container with pod restartPolicy Always")
podModify.create(oc)
g.By("Check pod failure reason")
err := podStatusReason(oc)
exutil.AssertWaitPollNoErr(err, "pod status does not contain CrashLoopBackOff")
g.By("Delete Pod ")
podModify.delete(oc)
g.By("create SUCCESSFUL init container with pod restartPolicy Always")
podModify.name = "init-always-succ"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "sleep 30"
podModify.restartPolicy = "Always"
podModify.create(oc)
g.By("Check pod Status")
err = podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Delete Pod")
podModify.delete(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
eb05a49a-abc5-4c0e-9452-8c2c5c3b0782
|
DEPRECATED-Author:pmali-High-12894-Init containers with restart policy OnFailure
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("DEPRECATED-Author:pmali-High-12894-Init containers with restart policy OnFailure", func() {
oc.SetupProject()
podModify.name = "init-onfailure-fail"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "exit 1"
podModify.restartPolicy = "OnFailure"
g.By("create FAILED init container with pod restartPolicy OnFailure")
podModify.create(oc)
g.By("Check pod failure reason")
err := podStatusReason(oc)
exutil.AssertWaitPollNoErr(err, "pod status does not contain CrashLoopBackOff")
g.By("Delete Pod ")
podModify.delete(oc)
g.By("create SUCCESSFUL init container with pod restartPolicy OnFailure")
podModify.name = "init-onfailure-succ"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "sleep 30"
podModify.restartPolicy = "OnFailure"
podModify.create(oc)
g.By("Check pod Status")
err = podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Delete Pod ")
podModify.delete(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
a2f1921f-8d33-4178-9ee6-224303073e5a
|
DEPRECATED-Author:pmali-High-12896-Init containers with restart policy Never
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("DEPRECATED-Author:pmali-High-12896-Init containers with restart policy Never", func() {
oc.SetupProject()
podModify.name = "init-never-fail"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "exit 1"
podModify.restartPolicy = "Never"
g.By("create FAILED init container with pod restartPolicy Never")
podModify.create(oc)
g.By("Check pod failure reason")
err := podStatusterminatedReason(oc)
exutil.AssertWaitPollNoErr(err, "pod status does not contain Error")
g.By("Delete Pod ")
podModify.delete(oc)
g.By("create SUCCESSFUL init container with pod restartPolicy Never")
podModify.name = "init-never-succ"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "sleep 30"
podModify.restartPolicy = "Never"
podModify.create(oc)
g.By("Check pod Status")
err = podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Delete Pod ")
podModify.delete(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0945281b-f9fa-4b17-ab2e-e5607906563d
|
DEPRECATED-Author:pmali-High-12911-App container status depends on init containers exit code
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("DEPRECATED-Author:pmali-High-12911-App container status depends on init containers exit code ", func() {
oc.SetupProject()
podModify.name = "init-fail"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/false"
podModify.args = "sleep 30"
podModify.restartPolicy = "Never"
g.By("create FAILED init container with exit code and command /bin/false")
podModify.create(oc)
g.By("Check pod failure reason")
err := podStatusterminatedReason(oc)
exutil.AssertWaitPollNoErr(err, "pod status does not contain Error")
g.By("Delete Pod ")
podModify.delete(oc)
g.By("create SUCCESSFUL init container with command /bin/true")
podModify.name = "init-success"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/true"
podModify.args = "sleep 30"
podModify.restartPolicy = "Never"
podModify.create(oc)
g.By("Check pod Status")
err = podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Delete Pod ")
podModify.delete(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
7696046f-8750-480e-84bb-3cc06f8083fc
|
DEPRECATED-Author:pmali-High-12913-Init containers with volume work fine
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("DEPRECATED-Author:pmali-High-12913-Init containers with volume work fine", func() {
oc.SetupProject()
podModify.name = "init-volume"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "echo This is OCP volume test > /work-dir/volume-test"
podModify.restartPolicy = "Never"
g.By("Create a pod with initContainer using volume\n")
podModify.create(oc)
g.By("Check pod status")
err := podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check Vol status\n")
err = volStatus(oc)
exutil.AssertWaitPollNoErr(err, "Init containers with volume do not work fine")
g.By("Delete Pod\n")
podModify.delete(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
4222905f-2804-48f3-a80f-8c5d99e3964b
|
Author:pmali-Medium-30521-CRIO Termination Grace Period test
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:pmali-Medium-30521-CRIO Termination Grace Period test", func() {
oc.SetupProject()
podTermination.name = "pod-termination"
podTermination.namespace = oc.Namespace()
g.By("Create a pod with termination grace period\n")
podTermination.create(oc)
g.By("Check pod status\n")
err := podStatus(oc, podTermination.namespace, podTermination.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check container TimeoutStopUSec\n")
err = podTermination.getTerminationGrace(oc)
exutil.AssertWaitPollNoErr(err, "terminationGracePeriodSeconds is not valid")
g.By("Delete Pod\n")
podTermination.delete(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
b26aad92-f433-491f-b4cd-85cc61b1277c
|
Author:minmli-High-38271-Init containers should not restart when the exited init container is removed from node
|
['e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:minmli-High-38271-Init containers should not restart when the exited init container is removed from node", func() {
g.By("Test for case OCP-38271")
oc.SetupProject()
podInitCon38271.name = "initcon-pod"
podInitCon38271.namespace = oc.Namespace()
g.By("Create a pod with init container")
podInitCon38271.create(oc)
defer podInitCon38271.delete(oc)
g.By("Check pod status")
err := podStatus(oc, podInitCon38271.namespace, podInitCon38271.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check init container exit normally")
err = podInitCon38271.containerExit(oc)
exutil.AssertWaitPollNoErr(err, "conainer not exit normally")
g.By("Delete init container")
_, err = podInitCon38271.deleteInitContainer(oc)
exutil.AssertWaitPollNoErr(err, "fail to delete container")
g.By("Check init container not restart again")
err = podInitCon38271.initContainerNotRestart(oc)
exutil.AssertWaitPollNoErr(err, "init container restart")
})
| |||||
test case
|
openshift/openshift-tests-private
|
cdc52b0d-480e-4f06-9065-0a32f6c23009
|
Author:schoudha-High-70987-Allow dev fuse by default in CRI-O
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:schoudha-High-70987-Allow dev fuse by default in CRI-O", func() {
exutil.By("Test for case OCP-70987")
podDevFuse70987.name = "pod-devfuse"
podDevFuse70987.namespace = oc.Namespace()
defer podDevFuse70987.delete(oc)
exutil.By("Create a pod with dev fuse")
podDevFuse70987.create(oc)
exutil.By("Check pod status")
err := podStatus(oc, podDevFuse70987.namespace, podDevFuse70987.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check if dev fuse is mounted inside the pod")
err = checkDevFuseMount(oc, podDevFuse70987.namespace, podDevFuse70987.name)
exutil.AssertWaitPollNoErr(err, "dev fuse is not mounted inside pod")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0581f998-32cb-433d-ad83-ca96dbffbabb
|
DEPRECATED-NonPreRelease-Longduration-Author:pmali-High-46306-Node should not becomes NotReady with error creating container storage layer not known[Disruptive][Slow]
|
['e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("DEPRECATED-NonPreRelease-Longduration-Author:pmali-High-46306-Node should not becomes NotReady with error creating container storage layer not known[Disruptive][Slow]", func() {
oc.SetupProject()
podSleep.namespace = oc.Namespace()
g.By("Get Worker Node and Add label app=sleep\n")
workerNodeName := getSingleWorkerNode(oc)
addLabelToResource(oc, "app=sleep", workerNodeName, "nodes")
defer removeLabelFromNode(oc, "app-", workerNodeName, "nodes")
g.By("Create a 50 pods on the same node\n")
for i := 0; i < 50; i++ {
podSleep.create(oc)
}
g.By("Check pod status\n")
err := podStatus(oc, podModify.namespace, podModify.name)
exutil.AssertWaitPollNoErr(err, "pod is NOT running")
g.By("Delete project\n")
go podSleep.deleteProject(oc)
g.By("Reboot Worker node\n")
go rebootNode(oc, workerNodeName)
//g.By("****** Reboot Worker Node ****** ")
//exutil.DebugNodeWithChroot(oc, workerNodeName, "reboot")
//g.By("Check Nodes Status\n")
//err = checkNodeStatus(oc, workerNodeName)
//exutil.AssertWaitPollNoErr(err, "node is not ready")
g.By("Get Master node\n")
masterNode := getSingleMasterNode(oc)
g.By("Check Master Node Logs\n")
err = masterNodeLog(oc, masterNode)
exutil.AssertWaitPollNoErr(err, "Logs Found, Test Failed")
})
| |||||
test case
|
openshift/openshift-tests-private
|
0400a31e-0cc3-4574-b3f3-db8f5721f289
|
DEPRECATED-Longduration-NonPreRelease-Author:pmali-Medium-11600-kubelet will evict pod immediately when met hard eviction threshold memory [Disruptive][Slow]
|
['e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("DEPRECATED-Longduration-NonPreRelease-Author:pmali-Medium-11600-kubelet will evict pod immediately when met hard eviction threshold memory [Disruptive][Slow]", func() {
oc.SetupProject()
kubeletConfig.name = "kubeletconfig-ocp11600"
kubeletConfig.labelkey = "custom-kubelet-ocp11600"
kubeletConfig.labelvalue = "hard-eviction"
memHog.name = "mem-hog-ocp11600"
memHog.namespace = oc.Namespace()
memHog.labelkey = kubeletConfig.labelkey
memHog.labelvalue = kubeletConfig.labelvalue
g.By("Get Worker Node and Add label custom-kubelet-ocp11600=hard-eviction\n")
addLabelToResource(oc, "custom-kubelet-ocp11600=hard-eviction", "worker", "mcp")
defer removeLabelFromNode(oc, "custom-kubelet-ocp11600-", "worker", "mcp")
g.By("Create Kubelet config \n")
kubeletConfig.create(oc)
defer getmcpStatus(oc, "worker") // To check all the Nodes are in Ready State after deleteing kubeletconfig
defer cleanupObjectsClusterScope(oc, objectTableRefcscope{"kubeletconfig", "kubeletconfig-ocp11600"})
g.By("Make sure Worker mcp is Updated correctly\n")
err := getmcpStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
g.By("Create a 10 pods on the same node\n")
for i := 0; i < 10; i++ {
memHog.create(oc)
}
defer cleanupObjectsClusterScope(oc, objectTableRefcscope{"ns", oc.Namespace()})
g.By("Check worker Node events\n")
workerNodeName := getSingleWorkerNode(oc)
err = getWorkerNodeDescribe(oc, workerNodeName)
exutil.AssertWaitPollNoErr(err, "Logs did not Found memory pressure, Test Failed")
})
| |||||
test case
|
openshift/openshift-tests-private
|
574c33fa-930c-440a-8533-97fc9b1a04b7
|
Author:weinliu-Critical-11055-/dev/shm can be automatically shared among all of a pod's containers
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:weinliu-Critical-11055-/dev/shm can be automatically shared among all of a pod's containers", func() {
g.By("Test for case OCP-11055")
oc.SetupProject()
podTwoContainers.name = "pod-twocontainers"
podTwoContainers.namespace = oc.Namespace()
g.By("Create a pod with two containers")
podTwoContainers.create(oc)
defer podTwoContainers.delete(oc)
g.By("Check pod status")
err := podStatus(oc, podTwoContainers.namespace, podTwoContainers.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Enter container 1 and write files")
_, err = exutil.RemoteShPodWithBashSpecifyContainer(oc, podTwoContainers.namespace, podTwoContainers.name, "hello-openshift", "echo 'written_from_container1' > /dev/shm/c1")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Enter container 2 and check whether it can share container 1 shared files")
containerFile1, err := exutil.RemoteShPodWithBashSpecifyContainer(oc, podTwoContainers.namespace, podTwoContainers.name, "hello-openshift-fedora", "cat /dev/shm/c1")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Container1 File Content is: %v", containerFile1)
o.Expect(containerFile1).To(o.Equal("written_from_container1"))
g.By("Enter container 2 and write files")
_, err = exutil.RemoteShPodWithBashSpecifyContainer(oc, podTwoContainers.namespace, podTwoContainers.name, "hello-openshift-fedora", "echo 'written_from_container2' > /dev/shm/c2")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Enter container 1 and check whether it can share container 2 shared files")
containerFile2, err := exutil.RemoteShPodWithBashSpecifyContainer(oc, podTwoContainers.namespace, podTwoContainers.name, "hello-openshift", "cat /dev/shm/c2")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Container2 File Content is: %v", containerFile2)
o.Expect(containerFile2).To(o.Equal("written_from_container2"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
a6353444-3a34-469f-8c92-20216e4c72be
|
DEPRECATED-Author:minmli-High-47663-run pods in user namespaces via crio workload annotation
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("DEPRECATED-Author:minmli-High-47663-run pods in user namespaces via crio workload annotation", func() {
oc.SetupProject()
g.By("Test for case OCP-47663")
podUserNS47663.name = "userns-47663"
podUserNS47663.namespace = oc.Namespace()
g.By("Check workload of openshift-builder exist in crio config")
err := podUserNS47663.crioWorkloadConfigExist(oc)
exutil.AssertWaitPollNoErr(err, "crio workload config not exist")
g.By("Check user containers exist in /etc/sub[ug]id")
err = podUserNS47663.userContainersExistForNS(oc)
exutil.AssertWaitPollNoErr(err, "user containers not exist for user namespace")
g.By("Create a pod with annotation of openshift-builder workload")
podUserNS47663.createPodUserNS(oc)
defer podUserNS47663.deletePodUserNS(oc)
g.By("Check pod status")
err = podStatus(oc, podUserNS47663.namespace, podUserNS47663.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check pod run in user namespace")
err = podUserNS47663.podRunInUserNS(oc)
exutil.AssertWaitPollNoErr(err, "pod not run in user namespace")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
b6670288-f57b-41db-a798-c301de4a31ed
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-52328-set workload resource usage from pod level : pod should not take effect if not defaulted or specified in workload [Disruptive][Slow]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-52328-set workload resource usage from pod level : pod should not take effect if not defaulted or specified in workload [Disruptive][Slow]", func() {
oc.SetupProject()
exutil.By("Test for case OCP-52328")
exutil.By("Create a machine config for workload setting")
mcCpuOverride := filepath.Join(buildPruningBaseDir, "machineconfig-cpu-override-52328.yaml")
mcpName := "worker"
defer func() {
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcCpuOverride).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcCpuOverride).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check mcp finish rolling out")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check workload setting is as expected")
wkloadConfig := []string{"crio.runtime.workloads.management", "activation_annotation = \"io.openshift.manager\"", "annotation_prefix = \"io.openshift.workload.manager\"", "crio.runtime.workloads.management.resources", "cpushares = 512"}
configPath := "/etc/crio/crio.conf.d/01-workload.conf"
err = configExist(oc, wkloadConfig, configPath)
exutil.AssertWaitPollNoErr(err, "workload setting is not set as expected")
exutil.By("Create a pod not specify cpuset in workload setting by annotation")
defer podWkloadCpu52328.delete(oc)
podWkloadCpu52328.name = "wkloadcpu-52328"
podWkloadCpu52328.namespace = oc.Namespace()
podWkloadCpu52328.workloadcpu = "{\"cpuset\": \"\", \"cpushares\": 1024}"
podWkloadCpu52328.create(oc)
exutil.By("Check pod status")
err = podStatus(oc, podWkloadCpu52328.namespace, podWkloadCpu52328.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check the pod only override cpushares")
cpuset := ""
err = overrideWkloadCpu(oc, cpuset, podWkloadCpu52328.namespace)
exutil.AssertWaitPollNoErr(err, "the pod not only override cpushares in workload setting")
})
| |||||
test case
|
openshift/openshift-tests-private
|
bd0b2306-735b-462e-9f65-cc4255df495b
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-52313-High-52326-High-52329-set workload resource usage from pod level : pod can get configured to defaults and override defaults and pod should not be set if annotation not specified [Disruptive][Slow]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-52313-High-52326-High-52329-set workload resource usage from pod level : pod can get configured to defaults and override defaults and pod should not be set if annotation not specified [Disruptive][Slow]", func() {
oc.SetupProject()
exutil.By("Test for case OCP-52313, OCP-52326 and OCP-52329")
exutil.By("Create a machine config for workload setting")
mcCpuOverride := filepath.Join(buildPruningBaseDir, "machineconfig-cpu-override.yaml")
defer func() {
mcpName := "worker"
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcCpuOverride).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcCpuOverride).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check mcp finish rolling out")
mcpName := "worker"
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check workload setting is as expected")
wkloadConfig := []string{"crio.runtime.workloads.management", "activation_annotation = \"io.openshift.manager\"", "annotation_prefix = \"io.openshift.workload.manager\"", "crio.runtime.workloads.management.resources", "cpushares = 512", "cpuset = \"0\""}
configPath := "/etc/crio/crio.conf.d/01-workload.conf"
err = configExist(oc, wkloadConfig, configPath)
exutil.AssertWaitPollNoErr(err, "workload setting is not set as expected")
exutil.By("Create a pod with default workload setting by annotation")
podWkloadCpu52313.name = "wkloadcpu-52313"
podWkloadCpu52313.namespace = oc.Namespace()
podWkloadCpu52313.create(oc)
exutil.By("Check pod status")
err = podStatus(oc, podWkloadCpu52313.namespace, podWkloadCpu52313.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check the pod get configured to default workload setting")
cpuset := "0"
err = overrideWkloadCpu(oc, cpuset, podWkloadCpu52313.namespace)
exutil.AssertWaitPollNoErr(err, "the pod is not configured to default workload setting")
podWkloadCpu52313.delete(oc)
exutil.By("Create a pod override the default workload setting by annotation")
podWkloadCpu52326.name = "wkloadcpu-52326"
podWkloadCpu52326.namespace = oc.Namespace()
podWkloadCpu52326.workloadcpu = "{\"cpuset\": \"0-1\", \"cpushares\": 200}"
podWkloadCpu52326.create(oc)
exutil.By("Check pod status")
err = podStatus(oc, podWkloadCpu52326.namespace, podWkloadCpu52326.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check the pod override the default workload setting")
cpuset = "0-1"
err = overrideWkloadCpu(oc, cpuset, podWkloadCpu52326.namespace)
exutil.AssertWaitPollNoErr(err, "the pod not override the default workload setting")
podWkloadCpu52326.delete(oc)
exutil.By("Create a pod without annotation but with prefix")
defer podWkloadCpu52329.delete(oc)
podWkloadCpu52329.name = "wkloadcpu-52329"
podWkloadCpu52329.namespace = oc.Namespace()
podWkloadCpu52329.workloadcpu = "{\"cpuset\": \"0-1\", \"cpushares\": 1800}"
podWkloadCpu52329.create(oc)
exutil.By("Check pod status")
err = podStatus(oc, podWkloadCpu52329.namespace, podWkloadCpu52329.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check the pod keep default workload setting")
cpuset = "0-1"
err = defaultWkloadCpu(oc, cpuset, podWkloadCpu52329.namespace)
exutil.AssertWaitPollNoErr(err, "the pod not keep efault workload setting")
})
| |||||
test case
|
openshift/openshift-tests-private
|
11cf6d84-467a-44d8-8944-45204b2c7229
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-LEVEL0-High-46313-set overlaySize in containerRuntimeConfig should take effect in container [Disruptive][Slow]
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-LEVEL0-High-46313-set overlaySize in containerRuntimeConfig should take effect in container [Disruptive][Slow]", func() {
oc.SetupProject()
g.By("Test for case OCP-46313")
ctrcfgOverlay.name = "ctrcfg-46313"
ctrcfgOverlay.overlay = "9G"
g.By("Create a containerRuntimeConfig to set overlaySize")
ctrcfgOverlay.create(oc)
defer func() {
g.By("Deleting configRuntimeConfig")
cleanupObjectsClusterScope(oc, objectTableRefcscope{"ContainerRuntimeConfig", "ctrcfg-46313"})
g.By("Check mcp finish rolling out")
err := getmcpStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
}()
g.By("Check mcp finish rolling out")
err := getmcpStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
g.By("Check overlaySize take effect in config file")
err = checkOverlaySize(oc, ctrcfgOverlay.overlay)
exutil.AssertWaitPollNoErr(err, "overlaySize not take effect")
g.By("Create a pod")
podTermination.name = "pod-46313"
podTermination.namespace = oc.Namespace()
podTermination.create(oc)
defer podTermination.delete(oc)
g.By("Check pod status")
err = podStatus(oc, podTermination.namespace, podTermination.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check in pod the root partition size for Overlay is correct.")
err = checkPodOverlaySize(oc, ctrcfgOverlay.overlay)
exutil.AssertWaitPollNoErr(err, "pod overlay size is not correct !!!")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
f84c3134-c295-4706-b5af-e7bb683dad4a
|
Author:minmli-High-56266-kubelet/crio will delete netns when a pod is deleted
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:minmli-High-56266-kubelet/crio will delete netns when a pod is deleted", func() {
g.By("Test for case OCP-56266")
oc.SetupProject()
g.By("Create a pod")
podHello.name = "pod-56266"
podHello.namespace = oc.Namespace()
podHello.create(oc)
g.By("Check pod status")
err := podStatus(oc, podHello.namespace, podHello.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Get Pod's Node name")
hostname := getPodNodeName(oc, podHello.namespace)
g.By("Get Pod's NetNS")
netNsPath, err := getPodNetNs(oc, hostname)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete the pod")
podHello.delete(oc)
g.By("Check the NetNs file was cleaned")
err = checkNetNs(oc, hostname, netNsPath)
exutil.AssertWaitPollNoErr(err, "the NetNs file is not cleaned !!!")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
a5b42906-5d72-4e94-b7ef-fd4f7daa2ef4
|
Author:minmli-High-55486-check not exist error MountVolume SetUp failed for volume serviceca object openshift-image-registry serviceca not registered
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:minmli-High-55486-check not exist error MountVolume SetUp failed for volume serviceca object openshift-image-registry serviceca not registered", func() {
g.By("Test for case OCP-55486")
oc.SetupProject()
g.By("Check events of each cronjob")
err := checkEventsForErr(oc)
exutil.AssertWaitPollNoErr(err, "Found error: MountVolume.SetUp failed for volume ... not registered ")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
3721dfb3-f669-444f-be77-f612fd406fa7
|
Author:asahay-Medium-55033-check KUBELET_LOG_LEVEL is 2
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-Medium-55033-check KUBELET_LOG_LEVEL is 2", func() {
g.By("Test for OCP-55033")
g.By("check Kubelet Log Level\n")
assertKubeletLogLevel(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
48fe8cf8-6c91-40bc-b7de-e26162332b38
|
Author:asahay-NonHyperShiftHOST-NonPreRelease-Longduration-LEVEL0-High-52472-update runtimeRequestTimeout parameter using KubeletConfig CR [Disruptive][Slow]
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-NonHyperShiftHOST-NonPreRelease-Longduration-LEVEL0-High-52472-update runtimeRequestTimeout parameter using KubeletConfig CR [Disruptive][Slow]", func() {
oc.SetupProject()
runtimeTimeout.name = "kubeletconfig-52472"
runtimeTimeout.labelkey = "custom-kubelet"
runtimeTimeout.labelvalue = "test-timeout"
g.By("Label mcp worker custom-kubelet as test-timeout \n")
addLabelToResource(oc, "custom-kubelet=test-timeout", "worker", "mcp")
defer removeLabelFromNode(oc, "custom-kubelet-", "worker", "mcp")
g.By("Create KubeletConfig \n")
defer func() {
mcpName := "worker"
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer runtimeTimeout.delete(oc)
runtimeTimeout.create(oc)
g.By("Check mcp finish rolling out")
mcpName := "worker"
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
g.By("Check Runtime Request Timeout")
runTimeTimeout(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
7310364e-d181-4f8a-9ccb-944d54a78760
|
Author:asahay-NonHyperShiftHOST-NonPreRelease-PreChkUpgrade-High-45436-Upgrading a cluster by making sure not keep duplicate machine config when it has multiple kubeletconfig [Disruptive][Slow]
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-NonHyperShiftHOST-NonPreRelease-PreChkUpgrade-High-45436-Upgrading a cluster by making sure not keep duplicate machine config when it has multiple kubeletconfig [Disruptive][Slow]", func() {
upgradeMachineconfig1.name = "max-pod"
upgradeMachineconfig2.name = "max-pod-1"
g.By("Create first KubeletConfig \n")
upgradeMachineconfig1.create(oc)
g.By("Check mcp finish rolling out")
mcpName := "worker"
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
g.By("Create second KubeletConfig \n")
upgradeMachineconfig2.create(oc)
g.By("Check mcp finish rolling out")
mcpName1 := "worker"
err1 := checkMachineConfigPoolStatus(oc, mcpName1)
exutil.AssertWaitPollNoErr(err1, "macineconfigpool worker update failed")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
c6d05f85-681e-409c-8f21-1edee4bbb7f7
|
Author:asahay-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-High-45436-post check Upgrading a cluster by making sure not keep duplicate machine config when it has multiple kubeletconfig [Disruptive][Slow]
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-High-45436-post check Upgrading a cluster by making sure not keep duplicate machine config when it has multiple kubeletconfig [Disruptive][Slow]", func() {
upgradeMachineconfig1.name = "max-pod"
defer func() {
g.By("Delete the KubeletConfig")
cleanupObjectsClusterScope(oc, objectTableRefcscope{"KubeletConfig", upgradeMachineconfig1.name})
g.By("Check mcp finish rolling out")
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
}()
upgradeMachineconfig2.name = "max-pod-1"
defer func() {
g.By("Delete the KubeletConfig")
cleanupObjectsClusterScope(oc, objectTableRefcscope{"KubeletConfig", upgradeMachineconfig2.name})
g.By("Check mcp finish rolling out")
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
}()
g.By("Checking no duplicate machine config")
checkUpgradeMachineConfig(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0b5d5493-e5d1-4f06-ba3e-70c493207dc1
|
NonHyperShiftHOST-NonPreRelease-PreChkUpgrade-Author:minmli-High-45351-prepare to check crioConfig[Disruptive][Slow]
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-PreChkUpgrade-Author:minmli-High-45351-prepare to check crioConfig[Disruptive][Slow]", func() {
rhelWorkers, err := exutil.GetAllWorkerNodesByOSID(oc, "rhel")
o.Expect(err).NotTo(o.HaveOccurred())
if len(rhelWorkers) > 0 {
g.Skip("ctrcfg.overlay can't be supported by rhel nodes")
}
if exutil.IsSNOCluster(oc) || exutil.Is3MasterNoDedicatedWorkerNode(oc) {
g.Skip("Skipped: Skip test for SNO/Compact clusters")
}
g.By("1) oc debug one worker and edit /etc/crio/crio.conf")
// we update log_level = "debug" in /etc/crio/crio.conf
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodename := nodeList.Items[0].Name
_, err = exutil.DebugNodeWithChroot(oc, nodename, "/bin/bash", "-c", "sed -i 's/log_level = \"info\"/log_level = \"debug\"/g' /etc/crio/crio.conf")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("2) create a ContainerRuntimeConfig to set overlaySize")
ctrcfgOverlay.name = "ctrcfg-45351"
ctrcfgOverlay.overlay = "35G"
mcpName := "worker"
ctrcfgOverlay.create(oc)
g.By("3) check mcp finish rolling out")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "mcp update failed")
g.By("4) check overlaySize update as expected")
err = checkOverlaySize(oc, ctrcfgOverlay.overlay)
exutil.AssertWaitPollNoErr(err, "overlaySize not update as expected")
})
| |||||
test case
|
openshift/openshift-tests-private
|
ed070219-edcb-47d9-9b0a-42106995053f
|
NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-Author:minmli-High-45351-post check crioConfig[Disruptive][Slow]
|
['"context"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-Author:minmli-High-45351-post check crioConfig[Disruptive][Slow]", func() {
rhelWorkers, err := exutil.GetAllWorkerNodesByOSID(oc, "rhel")
o.Expect(err).NotTo(o.HaveOccurred())
if len(rhelWorkers) > 0 {
g.Skip("ctrcfg.overlay can't be supported by rhel nodes")
}
if exutil.IsSNOCluster(oc) || exutil.Is3MasterNoDedicatedWorkerNode(oc) {
g.Skip("Skipped: Skip test for SNO/Compact clusters")
}
g.By("1) check overlaySize don't change after upgrade")
ctrcfgOverlay.name = "ctrcfg-45351"
ctrcfgOverlay.overlay = "35G"
defer func() {
g.By("Delete the configRuntimeConfig")
cleanupObjectsClusterScope(oc, objectTableRefcscope{"ContainerRuntimeConfig", ctrcfgOverlay.name})
g.By("Check mcp finish rolling out")
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
}()
defer func() {
g.By("Restore /etc/crio/crio.conf")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
for _, node := range nodeList.Items {
nodename := node.Name
_, err = exutil.DebugNodeWithChroot(oc, nodename, "/bin/bash", "-c", "sed -i 's/log_level = \"debug\"/log_level = \"info\"/g' /etc/crio/crio.conf")
o.Expect(err).NotTo(o.HaveOccurred())
}
}()
err = checkOverlaySize(oc, ctrcfgOverlay.overlay)
exutil.AssertWaitPollNoErr(err, "overlaySize change after upgrade")
g.By("2) check conmon value from crio config")
//we need check every node for the conmon = ""
checkConmonForAllNode(oc)
})
| |||||
test case
|
openshift/openshift-tests-private
|
9e3fc031-2455-4a8e-8a8c-2e00e4857675
|
Author:asahay-Medium-57332-collecting the audit log with must gather
|
['"os/exec"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-Medium-57332-collecting the audit log with must gather", func() {
defer exec.Command("bash", "-c", "rm -rf /tmp/must-gather-57332").Output()
g.By("Running the must gather command \n")
_, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--dest-dir=/tmp/must-gather-57332", "--", "/usr/bin/gather_audit_logs").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("check the must-gather result")
_, err = exec.Command("bash", "-c", "ls -l /tmp/must-gather-57332").Output()
o.Expect(err).NotTo(o.HaveOccurred())
})
| |||||
test case
|
openshift/openshift-tests-private
|
eab74f24-acd3-40f0-941e-9f3793c11db0
|
Author:asahay-NonHyperShiftHOST-Longduration-NonPreRelease-High-44820-change container registry config [Serial][Slow]
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-NonHyperShiftHOST-Longduration-NonPreRelease-High-44820-change container registry config [Serial][Slow]", func() {
ImgConfCont.name = "cluster"
expectedStatus1 := map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}
exutil.By("Verifying Config Changes in Image Registry")
exutil.By("#. Copy and save existing CRD configuration in JSON format")
originImageConfigJSON, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("image.config", "cluster", "-o", "json").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
e2e.Logf("\n Original Image Configuration %v", originImageConfigJSON)
defer func() {
exutil.By("restore original ImageConfig")
createImageConfigWIthExportJSON(oc, originImageConfigJSON) // restore original yaml
exutil.By("Check mcp finish updating")
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "Worker MCP is not updated")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "Master MCP is not updated")
exutil.By("Check the openshift-apiserver operator status")
err = waitCoBecomes(oc, "openshift-apiserver", 480, expectedStatus1)
exutil.AssertWaitPollNoErr(err, "openshift-apiserver operator does not become available in 480 seconds")
exutil.By("Check the image-registry operator status")
err = waitCoBecomes(oc, "image-registry", 480, expectedStatus1)
exutil.AssertWaitPollNoErr(err, "image-registry operator does not become available in 480 seconds")
}()
checkImageConfigUpdatedAsExpected(oc)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
09dd0eea-1489-4d54-9ca6-ed041ea7d39f
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-57401-Create ImageDigestMirrorSet successfully [Disruptive][Slow]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-57401-Create ImageDigestMirrorSet successfully [Disruptive][Slow]", func() {
//If a cluster contains any ICSP or IDMS, it will skip the case
if checkICSP(oc) || checkIDMS(oc) {
g.Skip("This cluster contain ICSP or IDMS, skip the test.")
}
exutil.By("Create an ImageDigestMirrorSet")
idms := filepath.Join(buildPruningBaseDir, "ImageDigestMirrorSet.yaml")
defer func() {
err := checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + idms).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + idms).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check the ImageDigestMirrorSet apply to config")
err = checkRegistryForIdms(oc)
exutil.AssertWaitPollNoErr(err, "check registry config failed")
exutil.By("The ImageContentSourcePolicy can't exist wiht ImageDigestMirrorSet or ImageTagMirrorSet")
icsp := filepath.Join(buildPruningBaseDir, "ImageContentSourcePolicy.yaml")
out, _ := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", icsp).Output()
o.Expect(strings.Contains(out, "Kind.ImageContentSourcePolicy: Forbidden: can't create ImageContentSourcePolicy when ImageDigestMirrorSet resources exist")).To(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
40b143cf-d70a-4f96-bb61-35f1ee28bf5c
|
NonHyperShiftHOST-Author:minmli-Medium-59552-Enable image signature verification for Red Hat Container Registries [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-Author:minmli-Medium-59552-Enable image signature verification for Red Hat Container Registries [Serial]", func() {
exutil.By("Check if mcp worker exist in current cluster")
machineCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-o=jsonpath={.status.machineCount}").Output()
if machineCount == "0" {
g.Skip("Skip for non-supported platform: mcp worker not exist!")
}
exutil.By("Apply a machine config to set image signature policy for worker nodes")
mcImgSig := filepath.Join(buildPruningBaseDir, "machineconfig-image-signature-59552.yaml")
mcpName := "worker"
defer func() {
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcImgSig).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcImgSig).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check the signature configuration policy.json")
err = checkImgSignature(oc)
exutil.AssertWaitPollNoErr(err, "check signature configuration failed")
})
| |||||
test case
|
openshift/openshift-tests-private
|
3cdc1691-0a42-4b21-9d8b-339d3f0cf811
|
Author:asahay-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-62746-A default SYSTEM_RESERVED_ES value is applied if it is empty [Disruptive][Slow]
|
['"context"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-62746-A default SYSTEM_RESERVED_ES value is applied if it is empty [Disruptive][Slow]", func() {
exutil.By("set SYSTEM_RESERVED_ES as empty")
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodename := nodeList.Items[0].Name
_, err = exutil.DebugNodeWithChroot(oc, nodename, "/bin/bash", "-c", "sed -i 's/SYSTEM_RESERVED_ES=1Gi/SYSTEM_RESERVED_ES=/g' /etc/crio/crio.conf")
o.Expect(err).NotTo(o.HaveOccurred())
systemReserveES.name = "kubeletconfig-62746"
systemReserveES.labelkey = "custom-kubelet"
systemReserveES.labelvalue = "reserve-space"
exutil.By("Label mcp worker custom-kubelet as reserve-space \n")
addLabelToResource(oc, "custom-kubelet=reserve-space", "worker", "mcp")
defer removeLabelFromNode(oc, "custom-kubelet-", "worker", "mcp")
exutil.By("Create KubeletConfig \n")
defer func() {
mcpName := "worker"
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer systemReserveES.delete(oc)
systemReserveES.create(oc)
exutil.By("Check mcp finish rolling out")
mcpName := "worker"
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check Default value")
parameterCheck(oc)
})
| |||||
test case
|
openshift/openshift-tests-private
|
1d78d695-e183-4c75-a7a7-0740d4a58f41
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-65404-log link inside pod via crio works well [Disruptive]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-65404-log link inside pod via crio works well [Disruptive]", func() {
exutil.By("Apply a machine config to enable log link via crio")
mcLogLink := filepath.Join(buildPruningBaseDir, "machineconfig-log-link.yaml")
mcpName := "worker"
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcLogLink).Execute()
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcLogLink).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check the crio config as expected")
logLinkConfig := []string{"crio.runtime.workloads.linked", "activation_annotation = \"io.kubernetes.cri-o.LinkLogs\"", "allowed_annotations = [ \"io.kubernetes.cri-o.LinkLogs\" ]"}
configPath := "/etc/crio/crio.conf.d/99-linked-log.conf"
err = configExist(oc, logLinkConfig, configPath)
exutil.AssertWaitPollNoErr(err, "crio config is not set as expected")
exutil.By("Create a pod with LinkLogs annotation")
podLogLink65404.name = "httpd"
podLogLink65404.namespace = oc.Namespace()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("ns", oc.Namespace(), "security.openshift.io/scc.podSecurityLabelSync=false", "pod-security.kubernetes.io/enforce=privileged", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer podLogLink65404.delete(oc)
podLogLink65404.create(oc)
exutil.By("Check pod status")
err = podStatus(oc, podLogLink65404.namespace, podLogLink65404.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check log link successfully")
checkLogLink(oc, podLogLink65404.namespace)
})
| |||||
test case
|
openshift/openshift-tests-private
|
a87b0fbf-e05f-438d-b956-69b34a841b3d
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-55683-Crun on OpenShift enable [Disruptive]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-55683-Crun on OpenShift enable [Disruptive]", func() {
exutil.By("Apply a ContarinerRuntimeConfig to enable crun")
ctrcfgCrun := filepath.Join(buildPruningBaseDir, "containerRuntimeConfig-crun.yaml")
mcpName := "worker"
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + ctrcfgCrun).Execute()
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + ctrcfgCrun).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check crun is running")
checkCrun(oc)
})
| |||||
test case
|
openshift/openshift-tests-private
|
a371cc25-0222-4ddf-a524-1d1f351a7cda
|
Author:minmli-DEPRECATED-High-68184-container_network metrics should keep reporting after container restart
|
['"fmt"', '"os/exec"', '"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:minmli-DEPRECATED-High-68184-container_network metrics should keep reporting after container restart", func() {
livenessProbeTermP68184 := liveProbeTermPeriod{
name: "liveness-probe",
namespace: oc.Namespace(),
terminationgrace: 60,
probeterminationgrace: 10,
template: livenessProbeTemp,
}
exutil.By("Create a pod")
defer livenessProbeTermP68184.delete(oc)
livenessProbeTermP68184.create(oc)
exutil.By("Check pod status")
err := podStatus(oc, livenessProbeTermP68184.namespace, livenessProbeTermP68184.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("Check the container_network* metrics report well")
podNode, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", livenessProbeTermP68184.name, "-o=jsonpath={.spec.nodeName}", "-n", livenessProbeTermP68184.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("podNode is :%v", podNode)
var cmdOut1 string
var cmdOut2 string
waitErr := wait.Poll(10*time.Second, 70*time.Second, func() (bool, error) {
cmd1 := fmt.Sprintf(`oc get --raw /api/v1/nodes/%v/proxy/metrics/cadvisor | grep container_network_transmit | grep %v || true`, podNode, livenessProbeTermP68184.name)
cmdOut1, err := exec.Command("bash", "-c", cmd1).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(cmdOut1), "container_network_transmit_bytes_total") && strings.Contains(string(cmdOut1), "container_network_transmit_errors_total") && strings.Contains(string(cmdOut1), "container_network_transmit_packets_dropped_total") && strings.Contains(string(cmdOut1), "container_network_transmit_packets_total") {
e2e.Logf("\ncontainer_network* metrics report well after pod start")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, fmt.Sprintf("check metrics failed after pod start! Metric result is: \n %v \n", cmdOut1))
exutil.By("Check the container_network* metrics still report after container restart")
waitErr = wait.Poll(80*time.Second, 5*time.Minute, func() (bool, error) {
restartCount, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", livenessProbeTermP68184.name, "-o=jsonpath={.status.containerStatuses[0].restartCount}", "-n", livenessProbeTermP68184.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("restartCount is :%v", restartCount)
o.Expect(strconv.Atoi(restartCount)).Should(o.BeNumerically(">=", 1), "error: the pod restart time < 1")
cmd2 := fmt.Sprintf(`oc get --raw /api/v1/nodes/%v/proxy/metrics/cadvisor | grep container_network_transmit | grep %v || true`, podNode, livenessProbeTermP68184.name)
cmdOut2, err := exec.Command("bash", "-c", cmd2).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(cmdOut2), "container_network_transmit_bytes_total") && strings.Contains(string(cmdOut2), "container_network_transmit_errors_total") && strings.Contains(string(cmdOut2), "container_network_transmit_packets_dropped_total") && strings.Contains(string(cmdOut2), "container_network_transmit_packets_total") {
e2e.Logf("\ncontainer_network* metrics report well after pod restart")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, fmt.Sprintf("check metrics failed after pod restart! Metric result is: \n %v \n", cmdOut2))
})
| |||||
test case
|
openshift/openshift-tests-private
|
84184317-703d-4a17-9c7c-1a525cba544e
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-Medium-66398-Enable WASM workloads in OCP
|
['"os/exec"', '"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-Medium-66398-Enable WASM workloads in OCP", func() {
podWASM66398 := podWASM{
name: "wasm-http",
namespace: oc.Namespace(),
template: podWASMTemp,
}
exutil.By("Apply a machineconfig to configure crun-wasm as the default runtime")
mcWASM := filepath.Join(buildPruningBaseDir, "machineconfig-wasm.yaml")
mcpName := "worker"
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcWASM).Execute()
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcWASM).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check the machine config pool finish updating")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Verify the crun-wasm is configured as expected")
wasmConfig := []string{"crio.runtime", "default_runtime = \"crun-wasm\"", "crio.runtime.runtimes.crun-wasm", "runtime_path = \"/usr/bin/crun\"", "crio.runtime.runtimes.crun-wasm.platform_runtime_paths", "\"wasi/wasm32\" = \"/usr/bin/crun-wasm\""}
configPath := "/etc/crio/crio.conf.d/99-crun-wasm.conf"
err = configExist(oc, wasmConfig, configPath)
exutil.AssertWaitPollNoErr(err, "crun-wasm is not set as expected")
exutil.By("Check if wasm bits are enabled appropriately")
exutil.By("1)label namespace pod-security.kubernetes.io/enforce=baseline")
addLabelToResource(oc, "pod-security.kubernetes.io/enforce=baseline", oc.Namespace(), "namespace")
exutil.By("2)Create a pod")
defer podWASM66398.delete(oc)
podWASM66398.create(oc)
exutil.By("3)Check pod status")
err = podStatus(oc, podWASM66398.namespace, podWASM66398.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("4)Expose the pod as a service")
_, err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("pod", podWASM66398.name, "-n", podWASM66398.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5)Expose the service as a route")
_, err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("service", podWASM66398.name, "-n", podWASM66398.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6)Get the route name")
routeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", podWASM66398.name, "-ojsonpath={.spec.host}", "-n", podWASM66398.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("7)Curl the route name")
out, err := exec.Command("bash", "-c", "curl "+routeName+" -d \"Hello world!\"").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(string(out), "echo: Hello world!")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
6b31301c-665a-452c-833f-194d634ce4d7
|
NonHyperShiftHOST-NonPreRelease-Author:jfrancoa-Medium-67564-node's drain should block when PodDisruptionBudget minAvailable equals 100 percentage and selector is empty [Disruptive]
|
['"fmt"', '"strings"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Author:jfrancoa-Medium-67564-node's drain should block when PodDisruptionBudget minAvailable equals 100 percentage and selector is empty [Disruptive]", func() {
exutil.By("Create a deployment with 6 replicas")
deploy := NewDeployment("hello-openshift", oc.Namespace(), "6", genericDeploymentTemp)
defer deploy.delete(oc)
deploy.create(oc)
deploy.waitForCreation(oc, 5)
exutil.By("Create PodDisruptionBudget")
pdb := NewPDB("my-pdb", oc.Namespace(), "100%", podDisruptionBudgetTemp)
defer pdb.delete(oc)
pdb.create(oc)
worker := getSingleWorkerNode(oc)
exutil.By(fmt.Sprintf("Obtain the pods running on node %v", worker))
podsInWorker, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("pods", "-n", oc.Namespace(), "-o=jsonpath={.items[?(@.spec.nodeName=='"+worker+"')].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(strings.Split(podsInWorker, " "))).Should(o.BeNumerically(">", 0))
// if the pdb's status is false and reason InsufficientPods
// means that it's not possible to drain a node keeping the
// required minimum availability, therefore the drain operation
// should block.
exutil.By("Make sure that PDB's status is False")
pdbStatus, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("poddisruptionbudget", "my-pdb", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[0].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(pdbStatus, "False")).Should(o.BeTrue())
exutil.By(fmt.Sprintf("Drain the node %v", worker))
defer waitClusterOperatorAvailable(oc)
defer oc.WithoutNamespace().AsAdmin().Run("adm").Args("uncordon", worker).Execute()
// Try to drain the node (it should fail) due to the 100%'s PDB minAvailability
// as the draining is impossible to happen, if we don't pass a timeout value this
// command will wait forever, as default timeout is 0s, which means infinite.
out, err := oc.WithoutNamespace().AsAdmin().Run("adm").Args("drain", worker, "--ignore-daemonsets", "--delete-emptydir-data", "--timeout=30s").Output()
o.Expect(err).To(o.HaveOccurred(), "Drain operation should have been blocked but it wasn't")
o.Expect(strings.Contains(out, "Cannot evict pod as it would violate the pod's disruption budget")).Should(o.BeTrue())
o.Expect(strings.Contains(out, "There are pending nodes to be drained")).Should(o.BeTrue())
exutil.By("Verify that the pods were not drained from the node")
podsAfterDrain, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("pods", "-n", oc.Namespace(), "-o=jsonpath={.items[?(@.spec.nodeName=='"+worker+"')].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podsInWorker).Should(o.BeIdenticalTo(podsAfterDrain))
})
| |||||
test case
|
openshift/openshift-tests-private
|
dc255efd-b723-44f2-8d80-75f6189285ab
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-70203-ICSP and IDMS/ITMS can coexist in cluster[Disruptive][Slow]
|
['"path/filepath"', '"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-70203-ICSP and IDMS/ITMS can coexist in cluster[Disruptive][Slow]", func() {
exutil.By("Check if any ICSP/IDMS/ITMS exist in the cluster")
//If a cluster contains any ICSP or IDMS or ITMS, it will skip the case
if checkICSPorIDMSorITMS(oc) {
g.Skip("This cluster contain ICSP or IDMS or ITMS, skip the test.")
}
exutil.By("1)Create an ICSP")
icsp := filepath.Join(buildPruningBaseDir, "ImageContentSourcePolicy-1.yaml")
defer func() {
err := checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + icsp).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + icsp).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2)Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("3)Check the config file /etc/containers/registries.conf update as expected")
registryConfig := []string{"location = \"registry.access.redhat.com/ubi8/ubi-minimal\"", "location = \"example.io/example/ubi-minimal\"", "location = \"example.com/example/ubi-minimal\"", "location = \"registry.example.com/example\"", "location = \"mirror.example.net\""}
configPath := "/etc/containers/registries.conf"
err = configExist(oc, registryConfig, configPath)
exutil.AssertWaitPollNoErr(err, "registry config is not set as expected")
/*
//After OCPBUGS-27190 is fixed, will uncomment the code block
exutil.By("4)Create an IDMS with the same registry/mirror config as ICSP but with conflicting policy")
idms := filepath.Join(buildPruningBaseDir, "ImageDigestMirrorSet-conflict.yaml")
out, _ := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", idms).Output()
o.Expect(strings.Contains(out, "XXXXX")).To(o.BeTrue())
*/
exutil.By("5)Create an IDMS with the same registry/mirror config as ICSP")
idms1 := filepath.Join(buildPruningBaseDir, "ImageDigestMirrorSet-1.yaml")
defer func() {
err := checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + idms1).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + idms1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6)Check the mcp doesn't get updated after idms created")
o.Consistently(func() bool {
worker_updated, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updated\")].status}").Output()
worker_updating, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updating\")].status}").Output()
master_updated, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "master", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updated\")].status}").Output()
master_updating, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "master", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updating\")].status}").Output()
return worker_updated == "True" && worker_updating == "False" && master_updated == "True" && master_updating == "False"
}).WithTimeout(60 * time.Second).WithPolling(5 * time.Second).Should(o.BeTrue())
exutil.By("7)Delete the ICSP")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + icsp).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("8)Check the mcp doesn't get updated after icsp deleted")
o.Consistently(func() bool {
worker_updated, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updated\")].status}").Output()
worker_updating, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "worker", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updating\")].status}").Output()
master_updated, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "master", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updated\")].status}").Output()
master_updating, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", "master", "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"Updating\")].status}").Output()
return worker_updated == "True" && worker_updating == "False" && master_updated == "True" && master_updating == "False"
}).WithTimeout(60 * time.Second).WithPolling(5 * time.Second).Should(o.BeTrue())
exutil.By("9)Check the config file /etc/containers/registries.conf keep the same")
registryConfig = []string{"location = \"registry.access.redhat.com/ubi8/ubi-minimal\"", "location = \"example.io/example/ubi-minimal\"", "location = \"example.com/example/ubi-minimal\"", "location = \"registry.example.com/example\"", "location = \"mirror.example.net\""}
configPath = "/etc/containers/registries.conf"
err = configExist(oc, registryConfig, configPath)
exutil.AssertWaitPollNoErr(err, "registry config is not set as expected")
exutil.By("10)Create an ITMS with different registry/mirror config from IDMS")
itms := filepath.Join(buildPruningBaseDir, "ImageTagMirrorSet.yaml")
defer func() {
err := checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + itms).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + itms).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("11)Check mcp finish updating")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("12)Check the config file /etc/containers/registries.conf update as expected")
registryConfig = []string{"location = \"registry.access.redhat.com/ubi9/ubi-minimal\"", "location = \"registry.redhat.io\"", "location = \"mirror.example.com\""}
configPath = "/etc/containers/registries.conf"
err = configExist(oc, registryConfig, configPath)
exutil.AssertWaitPollNoErr(err, "registry config is not set as expected")
})
| |||||
test case
|
openshift/openshift-tests-private
|
0c742b45-4d89-496b-ba45-1c7630998974
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-41897-Restricting CPUs for infra and application containers[Disruptive][Slow]
|
['"path/filepath"', '"strconv"', '"strings"', 'g "github.com/onsi/ginkgo/v2"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-41897-Restricting CPUs for infra and application containers[Disruptive][Slow]", func() {
exutil.By("Check cpu core num on the node")
workerNodes := getWorkersList(oc)
cpu_num := getCpuNum(oc, workerNodes[0])
//This case can only run on a node with more than 4 cpu cores
if cpu_num <= 4 {
g.Skip("This cluster has less than 4 cpu cores, skip the test.")
}
exutil.By("Test for case OCP-41897")
cpuPerformanceprofile := filepath.Join(buildPruningBaseDir, "cpu-performanceprofile.yaml")
perfProfile41897 := cpuPerfProfile{
name: "performance-41897",
isolated: "",
template: cpuPerformanceprofile,
}
isolatedCpu := "0,5-" + strconv.Itoa(cpu_num-1)
perfProfile41897.isolated = isolatedCpu
exutil.By("1)Create a performanceProfile")
//when delete the performanceprofile, only mcp worker will update
defer func() {
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
defer perfProfile41897.delete(oc)
perfProfile41897.create(oc)
//for 4.14+, master and worker pool need update to change cgroup from v2 to v1, then worker pool update to apply performanceprofile
exutil.By("2)Check the mcp finish updating")
//if cgroup is v2, then mcp master and worker need update to change to v1 first
cgroupV := getCgroupVersion(oc)
if cgroupV == "cgroup2fs" {
err := checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}
// the kubelet get generated when the mcp worker update to apply performanceprofile
exutil.By("3)Check the kubeletconfig get generated")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeletconfig", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(output, perfProfile41897.name)).Should(o.BeTrue())
e2e.Logf("kubeletconfig exist: [%v], then check the mcp worker finish updating\n", output)
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("4)Check the reserved cpu are as expected")
// 1) "reservedSystemCPUs": "1-4" from /etc/kubernetes/kubelet.conf
// 2) sh-5.1# pgrep systemd |while read i; do taskset -cp $i; done || results: pid 1's current affinity list: 1-4
//isolatedCpu := "0,5-" + strconv.Itoa(cpu_num-1)
reservedCpu := "1-4"
checkReservedCpu(oc, reservedCpu)
})
| |||||
test case
|
openshift/openshift-tests-private
|
9e6c08ef-77b6-41c8-9a3f-cd9f356bd831
|
NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-62985-Support disable cpu load balancing and cpu quota on RHEL 9 [Disruptive][Slow]
|
['"path/filepath"', '"strconv"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-Author:minmli-High-62985-Support disable cpu load balancing and cpu quota on RHEL 9 [Disruptive][Slow]", func() {
// in 4.16, it support cgroupv2; in 4.15-, it only support cgroupv1
exutil.By("Check cpu core num on the node")
workerNodes := getWorkersList(oc)
cpu_num := getCpuNum(oc, workerNodes[0])
//This case can only run on a node with more than 4 cpu cores
if cpu_num <= 4 {
g.Skip("This cluster has less than 4 cpu cores, skip the test.")
}
cpuPerformanceprofile := filepath.Join(buildPruningBaseDir, "cpu-performanceprofile.yaml")
perfProfile62985 := cpuPerfProfile{
name: "performance-62985",
isolated: "",
template: cpuPerformanceprofile,
}
isolatedCpu := "0,5-" + strconv.Itoa(cpu_num-1)
perfProfile62985.isolated = isolatedCpu
exutil.By("1)Create a performanceProfile")
defer func() {
perfProfile62985.delete(oc)
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
perfProfile62985.create(oc)
err := checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("2)Check the reserved cpu are as expected")
// 1) "reservedSystemCPUs": "1-4" from /etc/kubernetes/kubelet.conf
// 2) sh-5.1# pgrep systemd |while read i; do taskset -cp $i; done || results: pid 1's current affinity list: 1-4
reservedCpu := "1-4"
checkReservedCpu(oc, reservedCpu)
exutil.By("3)Turn on cpu info in dmesg log")
defer dmesgTurnOnCpu(oc, "1")
dmesgTurnOnCpu(oc, "0")
exutil.By("4)Create a pod with Guaranteed QoS, using at least a full CPU and load balance/cpu-quota disable annotation")
podCpuLoadBalance62985 := podCpuLoadBalance{
name: "cpu-load-balce-62985",
namespace: oc.Namespace(),
runtimeclass: "performance-performance-62985", //"performance-" + perfProfile62985.name
template: podCpuLoadBalanceTemp,
}
defer podCpuLoadBalance62985.delete(oc)
podCpuLoadBalance62985.create(oc)
exutil.By("5)Check pod Status")
err = podStatus(oc, podCpuLoadBalance62985.namespace, podCpuLoadBalance62985.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("6)Check the cpus are properly having load balance disabled")
checkCpuLoadBalanceDisabled(oc, podCpuLoadBalance62985.namespace, podCpuLoadBalance62985.name)
exutil.By("7)Check cpu-quota is disabled from container scope and pod cgroup correctly")
cgroupV := getCgroupVersion(oc)
checkCpuQuotaDisabled(oc, podCpuLoadBalance62985.namespace, podCpuLoadBalance62985.name, cgroupV)
})
| |||||
test case
|
openshift/openshift-tests-private
|
e467ac5d-36ce-4284-a1f7-dd19a41c63f9
|
Author:minmli-NonHyperShiftHOST-NonPreRelease-Longduration-High-73667-High-73412-Crio verify the sigstore signature using default policy when pulling images [Disruptive][Slow]
|
['"path/filepath"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:minmli-NonHyperShiftHOST-NonPreRelease-Longduration-High-73667-High-73412-Crio verify the sigstore signature using default policy when pulling images [Disruptive][Slow]", func() {
exutil.By("1)Enable featureGate of TechPreviewNoUpgrade")
exutil.By("Check if exist any featureSet in featuregate cluster")
featureSet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.featureSet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("featureSet is: %s", featureSet)
if featureSet == "TechPreviewNoUpgrade" {
e2e.Logf("featureSet is TechPreviewNoUpgrade already, no need setting again!")
/*
//comment the part of [featureSet == ""] to abserve the execution of tp profile in CI
} else if featureSet == "" {
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("featuregate", "cluster", "-p", "{\"spec\": {\"featureSet\": \"TechPreviewNoUpgrade\"}}", "--type=merge").Output()
if err != nil {
e2e.Failf("Fail to enable TechPreviewNoUpgrade, error:%v", err)
}
exutil.By("check mcp master and worker finish updating")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
} else {
g.Skip("featureSet is neither empty nor TechPreviewNoUpgrade,skip it!")
}
*/
} else {
g.Skip("featureSet is not TechPreviewNoUpgrade,skip it!")
}
exutil.By("2)Check the featureGate take effect")
//featureConfig := []string{"SignatureStores: true", "SigstoreImageVerification: true"} //4.17 be so
featureConfig := []string{"\"SignatureStores\": true", "\"SigstoreImageVerification\": true"} //4.16 be so
kubeletPath := "/etc/kubernetes/kubelet.conf"
err = configExist(oc, featureConfig, kubeletPath)
exutil.AssertWaitPollNoErr(err, "featureGate config check failed")
exutil.By("3)Set the crio loglevel [debug]")
ctrcfgLog := filepath.Join(buildPruningBaseDir, "containerRuntimeConfig_log_level.yaml")
mcpName := "worker"
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + ctrcfgLog).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + ctrcfgLog).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check mcp finish updating")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("Check the crio loglevel")
nodeName := getSingleWorkerNode(oc)
out, _ := exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "crio config | grep log_level")
o.Expect(strings.Contains(string(out), "log_level = \"debug\"")).Should(o.BeTrue())
exutil.By("4)Apply the ClusterImagePolicy manifest")
imgPolicy := filepath.Join(buildPruningBaseDir, "imagePolicy.yaml")
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + imgPolicy).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + imgPolicy).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check mcp finish updating")
err = checkMachineConfigPoolStatus(oc, "master")
exutil.AssertWaitPollNoErr(err, "macineconfigpool master update failed")
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
exutil.By("5)Create a pod with an image containing sigstore signature")
podSigstore73667.name = "pod-73667-sig"
podSigstore73667.namespace = oc.Namespace()
defer podSigstore73667.delete(oc)
podSigstore73667.create(oc)
exutil.By("6)Check the pod status")
err = podStatus(oc, podSigstore73667.namespace, podSigstore73667.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("7)check the crio log about sigstore signature verification")
docker_ns := "docker.io"
image := "docker.io/lyman9966/rhel8"
checkSigstoreVerified(oc, podSigstore73667.namespace, podSigstore73667.name, image, docker_ns)
exutil.By("8)validate pulling an image not containing sigstore signature will fail")
nodeName = getSingleWorkerNode(oc)
out, _ = exutil.DebugNodeWithChroot(oc, nodeName, "/bin/bash", "-c", "crictl pull docker.io/ocpqe/hello-pod:latest")
o.Expect(strings.Contains(string(out), "Source image rejected: A signature was required, but no signature exists")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
7f5bd6f8-d761-44d4-80d7-9e0b0f58aa7f
|
Author:minmli-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-72080-Verify cpu affinity of container process matches with cpuset cgroup controller interface file cpuset.cpus [Disruptive][Slow]
|
['"fmt"', '"path/filepath"', '"time"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:minmli-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-72080-Verify cpu affinity of container process matches with cpuset cgroup controller interface file cpuset.cpus [Disruptive][Slow]", func() {
//this case verify 3 scenarios:
//1)Verify burstable pods affinity contains all online cpus
//2)when guaranteed pods are created (with integral cpus) , the affinity of burstable pods are modified accordingly to remove any cpus that was used by guaranteed pod
//3)After node reboot, burstable pods affinity should contain all cpus excluding the cpus used by guranteed pods
exutil.By("1)Label a specific worker node")
workerNodes := getWorkersList(oc)
var worker string
for i := 0; i < len(workerNodes); i++ {
readyStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", workerNodes[i], "-o=jsonpath={.status.conditions[?(@.reason=='KubeletReady')].status}").Output()
if readyStatus == "True" {
worker = workerNodes[i]
break
}
}
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("nodes", worker, "node-role.kubernetes.io/worker-affinity-tests-").Output()
_, err := oc.AsAdmin().WithoutNamespace().Run("label").Args("nodes", worker, "node-role.kubernetes.io/worker-affinity-tests=", "--overwrite").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2)Create a machine config pool for the specific worker")
mcpAffinity := filepath.Join(buildPruningBaseDir, "machineconfigpool-affinity.yaml")
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + mcpAffinity).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + mcpAffinity).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2.1)Check the mcp finish updating")
mcpName := "worker-affinity-tests"
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("macineconfigpool %v update failed!", mcpName))
//the mcp worker also need updating after mcp worker-affinity-tests finish updating
err = checkMachineConfigPoolStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("macineconfigpool worker update failed!"))
exutil.By("3)Create a kubeletconfig to enable cpumanager")
kubeconfigCpumager := filepath.Join(buildPruningBaseDir, "kubeletconfig-cpumanager.yaml")
defer func() {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + kubeconfigCpumager).Execute()
err := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("macineconfigpool %v update failed!", mcpName))
}()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kubeconfigCpumager).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.1)Check the mcp finish updating")
err = checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("macineconfigpool %v update failed!", mcpName))
exutil.By("4)Check one running burstable pod that its cpu affinity include all online cpus")
//select one pod of ns openshift-cluster-node-tuning-operator which is running on the $worker node
burstPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-cluster-node-tuning-operator", "--field-selector=spec.nodeName="+worker, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
coreNum := getCpuNum(oc, worker)
burstNs := "openshift-cluster-node-tuning-operator"
checkCpuAffinityBurst(oc, burstPodName, burstNs, worker, coreNum, "false")
exutil.By("5)Create a guranteed pod with integral cpus")
podGuTemp := filepath.Join(buildPruningBaseDir, "pod-guaranteed.yaml")
podGu72080 := podGuDescription{
name: "gurantee-72080",
namespace: oc.Namespace(),
nodename: worker,
template: podGuTemp,
}
defer podGu72080.delete(oc)
podGu72080.create(oc)
exutil.By("5.1)Check the pod status")
err = podStatus(oc, podGu72080.namespace, podGu72080.name)
exutil.AssertWaitPollNoErr(err, "pod is not running")
exutil.By("5.2)Get cpu affinity of the guranteed pod")
gu_affinity := getCpuAffinityFromPod(oc, podGu72080.namespace, podGu72080.name)
exutil.By("6)Check the cpu affinity of burstable pod changed after creating the guranteed pod")
checkCpuAffinityBurst(oc, burstPodName, burstNs, worker, coreNum, gu_affinity)
exutil.By("7)Delete the guranteed pod")
podGu72080.delete(oc)
exutil.By("8)Check the cpu affinity of burstable pod revert after deleting the guranteed pod")
// there exist a bug currently, when deleting the pod, the cpu affinity of burstable pod can't revert in a short time
//checkCpuAffinityBurst(oc, burstPodName, burstNs, worker, coreNum, "false")
exutil.By("9)Create a deployment with guranteed pod with integral cpus")
deployGuTemp := filepath.Join(buildPruningBaseDir, "guaranteed-deployment.yaml")
deploy := NewDeploymentWithNode("guarantee-72080", oc.Namespace(), "1", worker, deployGuTemp)
defer deploy.delete(oc)
deploy.create(oc)
deploy.waitForCreation(oc, 5)
exutil.By("9.1)Get cpu affinity of the guranteed pod owned by the deployment")
guPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", oc.Namespace(), "--field-selector", "spec.nodeName="+worker, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
gu_affinity = getCpuAffinityFromPod(oc, oc.Namespace(), guPodName)
exutil.By("10)Check the cpu affinity of burstable pod changed after creating the deployment")
checkCpuAffinityBurst(oc, burstPodName, burstNs, worker, coreNum, gu_affinity)
exutil.By("11)Reboot the node")
defer checkNodeStatus(oc, worker, "Ready")
rebootNode(oc, worker)
checkNodeStatus(oc, worker, "NotReady")
checkNodeStatus(oc, worker, "Ready")
exutil.By("12)Check the cpu affinity of burstable pod contain all cpus excluding the cpus used by guranteed pods")
deploy.waitForCreation(oc, 5)
guPodName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", oc.Namespace(), "--field-selector", "spec.nodeName="+worker, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
gu_affinity = getCpuAffinityFromPod(oc, oc.Namespace(), guPodName)
burstPodName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-cluster-node-tuning-operator", "--field-selector=spec.nodeName="+worker, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
checkCpuAffinityBurst(oc, burstPodName, burstNs, worker, coreNum, gu_affinity)
})
| |||||
test case
|
openshift/openshift-tests-private
|
3c14e1ac-720a-47ad-9f6b-d0ad024ad4de
|
Author:asahay-High-78394-Make CRUN as Default Runtime for 4.18
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-High-78394-Make CRUN as Default Runtime for 4.18", func() {
exutil.By("1) Check Cluster Version")
clusterVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cluster version: %s\n", clusterVersion)
var expectedRuntime string
if strings.Contains(clusterVersion, "4.18") {
expectedRuntime = "crun"
} else {
expectedRuntime = "runc"
}
exutil.By("2) Check all Nodes are Up and Default Runtime is crun")
defaultRuntimeCheck(oc, expectedRuntime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
2a3bada6-32e1-4a7e-8538-857bb9a24729
|
Author:asahay-NonPreRelease-Longduration-High-78610-Default Runtime can be Updated to runc in 4.18[Serial]
|
['"path/filepath"', 'e2enode "k8s.io/kubernetes/test/e2e/framework/node"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-NonPreRelease-Longduration-High-78610-Default Runtime can be Updated to runc in 4.18[Serial]", func() {
exutil.By("1) Check Cluster Version")
clusterVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cluster version: %s\n", clusterVersion)
exutil.By("2.1) Apply ContainerRuntimeConfig install manifest on Worker node to request defaultRuntime to runc ")
ContainerRuntimeConfigTemp1 := filepath.Join(buildPruningBaseDir, "ContainerRuntimeConfigWorker-78610.yaml")
defer func() {
err := oc.AsAdmin().Run("delete").Args("-f=" + ContainerRuntimeConfigTemp1).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
mcpname1 := "worker"
err = checkMachineConfigPoolStatus(oc, mcpname1)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err1 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f=" + ContainerRuntimeConfigTemp1).Execute()
o.Expect(err1).NotTo(o.HaveOccurred())
exutil.By("2.2) Apply ContainerRuntimeConfig install manifest on Master node to request defaultRuntime to runc ")
ContainerRuntimeConfigTemp2 := filepath.Join(buildPruningBaseDir, "ContainerRuntimeConfigMaster-78610.yaml")
defer func() {
err := oc.AsAdmin().Run("delete").Args("-f=" + ContainerRuntimeConfigTemp2).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
mcpname2 := "master"
err = checkMachineConfigPoolStatus(oc, mcpname2)
exutil.AssertWaitPollNoErr(err, "macineconfigpool worker update failed")
}()
err2 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f=" + ContainerRuntimeConfigTemp2).Execute()
o.Expect(err2).NotTo(o.HaveOccurred())
exutil.By("3) Wait for MCP to Finish Update")
exutil.By("Check mcp finish rolling out")
oc.NotShowInfo()
mcpName1 := "worker"
mcpName2 := "master"
err3 := checkMachineConfigPoolStatus(oc, mcpName1)
exutil.AssertWaitPollNoErr(err3, "macineconfigpool worker update failed")
err4 := checkMachineConfigPoolStatus(oc, mcpName2)
exutil.AssertWaitPollNoErr(err4, "macineconfigpool master update failed")
//for checking machine config pool
mcp, err5 := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp").Output()
o.Expect(err5).NotTo(o.HaveOccurred())
e2e.Logf("\n Machine config pools are:\n %s", mcp)
exutil.By("4) Check the Default Runtime Value")
UpdatedRuntimeCheck(oc, "runc")
})
| |||||
test case
|
openshift/openshift-tests-private
|
aa59b8cd-03a5-4062-a4cd-087824eb0d73
|
Author:weinliu-LEVEL0-StagerunBoth-High-52383-Keda Install
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:weinliu-LEVEL0-StagerunBoth-High-52383-Keda Install", func() {
g.By("CMA (Keda) operator has been installed successfully")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
8af2dcf0-8f10-4b10-808b-b75024f10f9c
|
Author:weinliu-StagerunBoth-High-62570-Verify must-gather tool works with CMA
|
['"os"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:weinliu-StagerunBoth-High-62570-Verify must-gather tool works with CMA", func() {
var (
mustgatherName = "mustgather" + getRandomString()
mustgatherDir = "/tmp/" + mustgatherName
mustgatherLog = mustgatherName + ".log"
logFile string
)
g.By("Get the mustGatherImage")
mustGatherImage, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("packagemanifest", "-n=openshift-marketplace", "openshift-custom-metrics-autoscaler-operator", "-o=jsonpath={.status.channels[?(.name=='stable')].currentCSVDesc.annotations.containerImage}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Running the must gather command \n")
defer os.RemoveAll(mustgatherDir)
logFile, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--dest-dir="+mustgatherDir, "--image="+mustGatherImage).Output()
if err != nil {
e2e.Logf("mustgather created from image %v in %v logged to %v,%v %v", mustGatherImage, mustgatherDir, mustgatherLog, logFile, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
b72c03a2-b0cf-4a67-a788-c6bee04b1e49
|
Author:weinliu-High-60961-Audit logging test - stdout Metadata[Serial]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:weinliu-High-60961-Audit logging test - stdout Metadata[Serial]", func() {
g.By("Create KedaController with log level Metadata")
g.By("Create CMA Keda Controller ")
cmaKedaController := cmaKedaControllerDescription{
level: "Metadata",
template: cmaKedaControllerTemplate,
name: "keda",
namespace: "openshift-keda",
}
defer cmaKedaController.delete(oc)
cmaKedaController.create(oc)
metricsApiserverPodName := getPodNameByLabel(oc, "openshift-keda", "app=keda-metrics-apiserver")
waitPodReady(oc, "openshift-keda", "app=keda-metrics-apiserver")
g.By("Check the Audit Logged as configed")
log, err := exutil.GetSpecificPodLogs(oc, "openshift-keda", "", metricsApiserverPodName[0], "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(log, "\"level\":\"Metadata\"")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
ed097078-edf0-40ae-97bf-c17a2687d8d4
|
Author:asahay-High-60962-Audit logging test - stdout Request[Serial]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-High-60962-Audit logging test - stdout Request[Serial]", func() {
g.By("Create KedaController with log level Request")
g.By("Create CMA Keda Controller ")
cmaKedaController := cmaKedaControllerDescription{
level: "Request",
template: cmaKedaControllerTemplate,
name: "keda",
namespace: "openshift-keda",
}
defer cmaKedaController.delete(oc)
cmaKedaController.create(oc)
metricsApiserverPodName := getPodNameByLabel(oc, "openshift-keda", "app=keda-metrics-apiserver")
waitPodReady(oc, "openshift-keda", "app=keda-metrics-apiserver")
g.By("Check the Audit Logged as configed")
log, err := exutil.GetSpecificPodLogs(oc, "openshift-keda", "", metricsApiserverPodName[0], "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(log, "\"level\":\"Request\"")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
a4ab79af-c9b8-4d78-9764-e4d0ce540f11
|
Author:asahay-High-60963-Audit logging test - stdout RequestResponse[Serial]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-High-60963-Audit logging test - stdout RequestResponse[Serial]", func() {
g.By("Create KedaController with log level RequestResponse")
g.By("Create CMA Keda Controller ")
cmaKedaController := cmaKedaControllerDescription{
level: "RequestResponse",
template: cmaKedaControllerTemplate,
name: "keda",
namespace: "openshift-keda",
}
defer cmaKedaController.delete(oc)
cmaKedaController.create(oc)
metricsApiserverPodName := getPodNameByLabel(oc, "openshift-keda", "app=keda-metrics-apiserver")
waitPodReady(oc, "openshift-keda", "app=keda-metrics-apiserver")
g.By("Check the Audit Logged as configed")
log, err := exutil.GetSpecificPodLogs(oc, "openshift-keda", "", metricsApiserverPodName[0], "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(log, "\"level\":\"RequestResponse\"")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
dbacfb08-0761-43c7-a795-03a18a3aff77
|
Author:asahay-High-60964-Audit logging test - Writing to PVC [Serial]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-High-60964-Audit logging test - Writing to PVC [Serial]", func() {
exutil.By("1) Create a PVC")
pvc := filepath.Join(buildPruningBaseDir, "pvc-60964.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+pvc, "-n", "openshift-keda").Execute()
err := oc.AsAdmin().Run("create").Args("-f="+pvc, "-n", "openshift-keda").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2) Create KedaController with log level Metdata")
exutil.By("Create CMA Keda Controller ")
pvcKedaControllerTemp := filepath.Join(buildPruningBaseDir, "pvcKedaControllerTemp-60964.yaml")
pvcKedaController := pvcKedaControllerDescription{
level: "Metadata",
template: pvcKedaControllerTemp,
name: "keda",
namespace: "openshift-keda",
watchNamespace: "openshift-keda",
}
defer pvcKedaController.delete(oc)
pvcKedaController.create(oc)
metricsApiserverPodName := getPodNameByLabel(oc, "openshift-keda", "app=keda-metrics-apiserver")
waitPodReady(oc, "openshift-keda", "app=keda-metrics-apiserver")
var output string
exutil.By("3) Checking PVC creation")
output, err = oc.AsAdmin().Run("get").Args("pvc", "-n", "openshift-keda").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("PVC is %v", output)
exutil.By("4) Checking KEDA Controller")
errCheck := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("get").Args("KedaController", "-n", "openshift-keda").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "keda") {
e2e.Logf("Keda Controller has been created Successfully!")
return true, nil
}
return false, nil
})
e2e.Logf("Output is %s", output)
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("KedaController has not been created"))
exutil.By("5) Checking status of pods")
waitPodReady(oc, "openshift-keda", "app=keda-metrics-apiserver")
exutil.By("6) Verifying audit logs for 'Metadata'")
errCheck = wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
auditOutput := ExecCommandOnPod(oc, metricsApiserverPodName[0], "openshift-keda", "tail $(ls -t /var/audit-policy/log*/log-out-pvc | head -1)")
if strings.Contains(auditOutput, "Metadata") {
e2e.Logf("Audit log contains 'Metadata ")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Audit Log does not contain Metadata"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
65f1fd12-e063-45ff-a567-7c10a5418636
|
Author:weinliu-Critical-52384-Automatically scaling pods based on Kafka Metrics[Serial][Slow]
|
['"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:weinliu-Critical-52384-Automatically scaling pods based on Kafka Metrics[Serial][Slow]", func() {
var (
scaledObjectStatus string
)
exutil.By("Create a kedacontroller with default template")
kedaControllerDefault := filepath.Join(buildPruningBaseDir, "keda-controller-default.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", "openshift-keda", "KedaController", "keda").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kedaControllerDefault).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kafaksNs := "kafka-52384"
defer deleteProject(oc, kafaksNs)
createProject(oc, kafaksNs)
//Create kafak
exutil.By("Subscribe to AMQ operator")
defer removeAmqOperator(oc)
createAmqOperator(oc)
exutil.By("Test for case OCP-52384")
exutil.By(" 1) Create a Kafka instance")
kafka := filepath.Join(buildPruningBaseDir, "kafka-52384.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + kafka).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kafka).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2) Create a Kafka topic")
kafkaTopic := filepath.Join(buildPruningBaseDir, "kafka-topic-52384.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f=" + kafkaTopic).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kafkaTopic).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3) Check if Kafka and Kafka topic are ready")
// Wait for Kafka and KafkaTopic to be ready
waitForKafkaReady(oc, "my-cluster", kafaksNs)
namespace := oc.Namespace()
exutil.By("4) Create a Kafka Consumer")
kafkaConsumerDeployment := filepath.Join(buildPruningBaseDir, "kafka-consumer-deployment-52384.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+kafkaConsumerDeployment, "-n", namespace).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+kafkaConsumerDeployment, "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5) Create a scaledobjectc")
kafkaScaledobject := filepath.Join(buildPruningBaseDir, "kafka-scaledobject-52384.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+kafkaScaledobject, "-n", namespace).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+kafkaScaledobject, "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5.1) Check ScaledObject is up")
err = wait.Poll(3*time.Second, 300*time.Second, func() (bool, error) {
scaledObjectStatus, _ = oc.AsAdmin().Run("get").Args("ScaledObject", "kafka-amqstreams-consumer-scaledobject", "-o=jsonpath={.status.health.s0-kafka-my-topic.status}", "-n", namespace).Output()
if scaledObjectStatus == "Happy" {
e2e.Logf("ScaledObject is up and working")
return true, nil
}
e2e.Logf("ScaledObject is not in working status, current status: %v", scaledObjectStatus)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "scaling failed")
exutil.By("Kafka scaling is up and ready")
exutil.By("6)Create a Kafka load")
kafkaLoad := filepath.Join(buildPruningBaseDir, "kafka-load-52384.yaml")
defer oc.AsAdmin().Run("delete").Args("jobs", "--field-selector", "status.successful=1", "-n", namespace).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+kafkaLoad, "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6.1) Check ScaledObject is up")
err = wait.Poll(3*time.Second, 300*time.Second, func() (bool, error) {
scaledObjectStatus, _ = oc.AsAdmin().Run("get").Args("ScaledObject", "kafka-amqstreams-consumer-scaledobject", "-o=jsonpath={.status.health.s0-kafka-my-topic.status}", "-n", namespace).Output()
if scaledObjectStatus == "Happy" {
e2e.Logf("ScaledObject is up and working")
return true, nil
}
e2e.Logf("ScaledObject is not in working status, current status: %v", scaledObjectStatus)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "scaling failed")
exutil.By("Kafka scaling is up and ready")
})
| |||||
test case
|
openshift/openshift-tests-private
|
b1c8edd4-4a7f-4210-ab88-5b54c3cd6e9e
|
Author:weinliu-ConnectedOnly-Critical-52385-Automatically scaling pods based on Prometheus metrics[Serial]
|
['"fmt"', '"os"', '"path/filepath"', '"regexp"', '"strings"', '"time"', '"github.com/tidwall/sjson"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:weinliu-ConnectedOnly-Critical-52385-Automatically scaling pods based on Prometheus metrics[Serial]", func() {
exutil.By("Create a kedacontroller with default template")
kedaControllerDefault := filepath.Join(buildPruningBaseDir, "keda-controller-default.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", "openshift-keda", "KedaController", "keda").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kedaControllerDefault).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
var scaledObjectStatus string
triggerAuthenticationTempl := filepath.Join(buildPruningBaseDir, "triggerauthentication-52385.yaml")
triggerAuthentication52385 := triggerAuthenticationDescription{
secretname: "",
namespace: "",
template: triggerAuthenticationTempl,
}
cmaNs := "cma-52385"
defer deleteProject(oc, cmaNs)
createProject(oc, cmaNs)
exutil.By("1) Create OpenShift monitoring for user-defined projects")
// Look for cluster-level monitoring configuration
getOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Enable user workload monitoring
if len(getOutput) > 0 {
exutil.By("ConfigMap cluster-monitoring-config exists, extracting cluster-monitoring-config ...")
extractOutput, _, _ := oc.AsAdmin().WithoutNamespace().Run("extract").Args("ConfigMap/cluster-monitoring-config", "-n", "openshift-monitoring", "--to=-").Outputs()
//if strings.Contains(strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(extractOutput, "'", ""), "\"", ""), " ", ""), "enableUserWorkload:true") {
cleanedOutput := strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(extractOutput, "'", ""), "\"", ""), " ", "")
e2e.Logf("cleanedOutput is %s", cleanedOutput)
if matched, _ := regexp.MatchString("enableUserWorkload:\\s*true", cleanedOutput); matched {
exutil.By("User workload is enabled, doing nothing ... ")
} else {
exutil.By("User workload is not enabled, enabling ...")
exutil.By("Get current monitoring configuration to recover")
originclusterMonitoringConfig, getContentError := oc.AsAdmin().Run("get").Args("ConfigMap/cluster-monitoring-config", "-ojson", "-n", "openshift-monitoring").Output()
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfig, getContentError = sjson.Delete(originclusterMonitoringConfig, `metadata.resourceVersion`)
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfig, getContentError = sjson.Delete(originclusterMonitoringConfig, `metadata.uid`)
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfigFilePath := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-52385.json")
o.Expect(os.WriteFile(originclusterMonitoringConfigFilePath, []byte(originclusterMonitoringConfig), 0644)).NotTo(o.HaveOccurred())
defer func() {
errReplace := oc.AsAdmin().WithoutNamespace().Run("replace").Args("-f", originclusterMonitoringConfigFilePath).Execute()
o.Expect(errReplace).NotTo(o.HaveOccurred())
}()
exutil.By("Deleting current monitoring configuration")
oc.WithoutNamespace().AsAdmin().Run("delete").Args("ConfigMap/cluster-monitoring-config", "-n", "openshift-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create my monitoring configuration")
prometheusConfigmap := filepath.Join(buildPruningBaseDir, "prometheus-configmap.yaml")
_, err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-f=" + prometheusConfigmap).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
} else {
e2e.Logf("ConfigMap cluster-monitoring-config does not exist, creating ...")
prometheusConfigmap := filepath.Join(buildPruningBaseDir, "prometheus-configmap.yaml")
defer func() {
errDelete := oc.WithoutNamespace().AsAdmin().Run("delete").Args("-f=" + prometheusConfigmap).Execute()
o.Expect(errDelete).NotTo(o.HaveOccurred())
}()
_, err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-f=" + prometheusConfigmap).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("2) Deploy application that exposes Prometheus metrics")
prometheusComsumer := filepath.Join(buildPruningBaseDir, "prometheus-comsumer-deployment.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+prometheusComsumer, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+prometheusComsumer, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2.1) Verify the deployment is available")
errCheck := wait.Poll(20*time.Second, 280*time.Second, func() (bool, error) {
output, err1 := oc.AsAdmin().Run("get").Args("deployment", "-n", cmaNs).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if strings.Contains(output, "test-app") && strings.Contains(output, "1/1") {
e2e.Logf("Deployment has been created Sucessfully!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Depolyment has not been created"))
exutil.By("3) Create a Service Account")
defer oc.WithoutNamespace().AsAdmin().Run("delete").Args("sa", "thanos-52385", "-n", cmaNs).Execute()
err = oc.WithoutNamespace().AsAdmin().Run("create").Args("sa", "thanos-52385", "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.1) Create Service Account Token")
servicetokenTemp := filepath.Join(buildPruningBaseDir, "servicetoken-52385.yaml")
token, err := oc.AsAdmin().SetNamespace(cmaNs).Run("apply").Args("-f", servicetokenTemp).Output()
e2e.Logf("err %v, token %v", err, token)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.2) Make sure the token is available")
serviceToken, err := oc.AsAdmin().Run("get").Args("secret", "thanos-token", "-n", cmaNs).Output()
e2e.Logf("err %v, token %v", err, serviceToken)
o.Expect(err).NotTo(o.HaveOccurred())
saTokenName := "thanos-token"
exutil.By("4) Define TriggerAuthentication with the Service Account's token")
triggerAuthentication52385.secretname = string(saTokenName[:])
triggerAuthentication52385.namespace = cmaNs
defer oc.AsAdmin().Run("delete").Args("-n", cmaNs, "TriggerAuthentication", "keda-trigger-auth-prometheus").Execute()
triggerAuthentication52385.create(oc)
exutil.By("4.1) Check TriggerAuthentication is Available")
triggerauth, err := oc.AsAdmin().Run("get").Args("TriggerAuthentication", "-n", cmaNs).Output()
e2e.Logf("Triggerauthentication is %v", triggerauth)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5) Create a role for reading metric from Thanos")
role := filepath.Join(buildPruningBaseDir, "role.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+role, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+role, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5.1) Check Role is Available")
rolecheck, err := oc.AsAdmin().Run("get").Args("Role", "-n", cmaNs).Output()
e2e.Logf("Role %v", rolecheck)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5.2) Add the role for reading metrics from Thanos to the Service Account")
rolebinding := filepath.Join(buildPruningBaseDir, "rolebinding-52385.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+rolebinding, "-n", cmaNs).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+rolebinding, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6) Deploy ScaledObject to enable application autoscaling")
scaledobject := filepath.Join(buildPruningBaseDir, "scaledobject-52385.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+scaledobject, "-n", cmaNs).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+scaledobject, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6.1) Check ScaledObject is up")
err = wait.Poll(3*time.Second, 100*time.Second, func() (bool, error) {
scaledObjectStatus, _ = oc.AsAdmin().Run("get").Args("ScaledObject", "prometheus-scaledobject", "-o=jsonpath={.status.health.s0-prometheus.status}", "-n", cmaNs).Output()
if scaledObjectStatus == "Happy" {
e2e.Logf("ScaledObject is up and working")
return true, nil
}
e2e.Logf("ScaledObject is not in working status, current status: %v", scaledObjectStatus)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "scaling failed")
exutil.By("prometheus scaling is up and ready")
exutil.By("7) Generate requests to test the application autoscaling")
load := filepath.Join(buildPruningBaseDir, "load-52385.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+load, "-n", cmaNs).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+load, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("7.1) Check ScaledObject is up")
err = wait.Poll(3*time.Second, 100*time.Second, func() (bool, error) {
scaledObjectStatus, _ = oc.AsAdmin().Run("get").Args("ScaledObject", "prometheus-scaledobject", "-o=jsonpath={.status.health.s0-prometheus.status}", "-n", cmaNs).Output()
if scaledObjectStatus == "Happy" {
e2e.Logf("ScaledObject is up and working")
return true, nil
}
e2e.Logf("ScaledObject is not in working status, current status: %v", scaledObjectStatus)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "scaling failed")
exutil.By("prometheus scaling is up and ready")
})
| |||||
test case
|
openshift/openshift-tests-private
|
994b1177-bd68-4ce1-b72c-81b6611e841e
|
Author:asahay-ConnectedOnly-Critical-73296-KEDA-Operator is missing files causing cron triggers with Timezone Failure [Serial]
|
['"fmt"', '"os"', '"path/filepath"', '"regexp"', '"strings"', '"time"', '"github.com/tidwall/sjson"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-ConnectedOnly-Critical-73296-KEDA-Operator is missing files causing cron triggers with Timezone Failure [Serial]", func() {
exutil.By("Create a kedacontroller with default template")
kedaControllerDefault := filepath.Join(buildPruningBaseDir, "keda-controller-default.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", "openshift-keda", "KedaController", "keda").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kedaControllerDefault).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
triggerAuthenticationTempl := filepath.Join(buildPruningBaseDir, "triggerauthentication-73296.yaml")
triggerAuthentication73296 := triggerAuthenticationDescription{
secretname: "",
namespace: "",
template: triggerAuthenticationTempl,
}
cmaNs := "cma-73296"
defer deleteProject(oc, cmaNs)
createProject(oc, cmaNs)
exutil.By("1) Create OpenShift monitoring for user-defined projects")
// Look for cluster-level monitoring configuration
getOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Enable user workload monitoring
if len(getOutput) > 0 {
exutil.By("ConfigMap cluster-monitoring-config exists, extracting cluster-monitoring-config ...")
extractOutput, _, _ := oc.AsAdmin().WithoutNamespace().Run("extract").Args("ConfigMap/cluster-monitoring-config", "-n", "openshift-monitoring", "--to=-").Outputs()
//if strings.Contains(strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(extractOutput, "'", ""), "\"", ""), " ", ""), "enableUserWorkload:true") {
cleanedOutput := strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(extractOutput, "'", ""), "\"", ""), " ", "")
e2e.Logf("cleanedOutput is %s", cleanedOutput)
if matched, _ := regexp.MatchString("enableUserWorkload:\\s*true", cleanedOutput); matched {
exutil.By("User workload is enabled, doing nothing ... ")
} else {
exutil.By("User workload is not enabled, enabling ...")
exutil.By("Get current monitoring configuration to recover")
originclusterMonitoringConfig, getContentError := oc.AsAdmin().Run("get").Args("ConfigMap/cluster-monitoring-config", "-ojson", "-n", "openshift-monitoring").Output()
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfig, getContentError = sjson.Delete(originclusterMonitoringConfig, `metadata.resourceVersion`)
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfig, getContentError = sjson.Delete(originclusterMonitoringConfig, `metadata.uid`)
o.Expect(getContentError).NotTo(o.HaveOccurred())
originclusterMonitoringConfigFilePath := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-73296.json")
o.Expect(os.WriteFile(originclusterMonitoringConfigFilePath, []byte(originclusterMonitoringConfig), 0644)).NotTo(o.HaveOccurred())
defer func() {
errReplace := oc.AsAdmin().WithoutNamespace().Run("replace").Args("-f", originclusterMonitoringConfigFilePath).Execute()
o.Expect(errReplace).NotTo(o.HaveOccurred())
}()
exutil.By("Deleting current monitoring configuration")
oc.WithoutNamespace().AsAdmin().Run("delete").Args("ConfigMap/cluster-monitoring-config", "-n", "openshift-monitoring").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create my monitoring configuration")
prometheusConfigmap := filepath.Join(buildPruningBaseDir, "prometheus-configmap.yaml")
_, err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-f=" + prometheusConfigmap).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
} else {
e2e.Logf("ConfigMap cluster-monitoring-config does not exist, creating ...")
prometheusConfigmap := filepath.Join(buildPruningBaseDir, "prometheus-configmap.yaml")
defer func() {
errDelete := oc.WithoutNamespace().AsAdmin().Run("delete").Args("-f=" + prometheusConfigmap).Execute()
o.Expect(errDelete).NotTo(o.HaveOccurred())
}()
_, err = oc.WithoutNamespace().AsAdmin().Run("create").Args("-f=" + prometheusConfigmap).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
exutil.By("2) Deploy application that exposes Prometheus metrics")
prometheusComsumer := filepath.Join(buildPruningBaseDir, "prometheus-comsumer-deployment.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+prometheusComsumer, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+prometheusComsumer, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3) Create a Service Account")
defer oc.WithoutNamespace().AsAdmin().Run("delete").Args("sa", "thanos-73296", "-n", cmaNs).Execute()
err = oc.WithoutNamespace().AsAdmin().Run("create").Args("sa", "thanos-73296", "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.1) Create Service Account Token")
servicetokenTemp := filepath.Join(buildPruningBaseDir, "servicetoken-73296.yaml")
token, err := oc.AsAdmin().SetNamespace(cmaNs).Run("apply").Args("-f", servicetokenTemp).Output()
e2e.Logf("err %v, token %v", err, token)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3.2) Make sure the token is still there and didn't get deleted")
serviceToken, err := oc.AsAdmin().Run("get").Args("secret", "thanos-token", "-n", cmaNs).Output()
e2e.Logf("err %v, token %v", err, serviceToken)
o.Expect(err).NotTo(o.HaveOccurred())
saTokenName := "thanos-token"
exutil.By("3.3) Define TriggerAuthentication with the Service Account's token")
triggerAuthentication73296.secretname = string(saTokenName[:])
triggerAuthentication73296.namespace = cmaNs
defer oc.AsAdmin().Run("delete").Args("-n", cmaNs, "TriggerAuthentication", "keda-trigger-auth-prometheus").Execute()
triggerAuthentication73296.create(oc)
exutil.By("4) Create a role for reading metric from Thanos")
role := filepath.Join(buildPruningBaseDir, "role.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+role, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+role, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5) Add the role for reading metrics from Thanos to the Service Account")
rolebinding := filepath.Join(buildPruningBaseDir, "rolebinding-73296.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+rolebinding, "-n", cmaNs).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+rolebinding, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6) Create a Test Deployment")
testDeploymentTemp := filepath.Join(buildPruningBaseDir, "testdeployment-73296.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+testDeploymentTemp, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+testDeploymentTemp, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("6.1) Verify the deployment is available")
errCheck := wait.Poll(20*time.Second, 280*time.Second, func() (bool, error) {
output, err1 := oc.AsAdmin().Run("get").Args("deployment", "-n", cmaNs).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if strings.Contains(output, "busybox") && strings.Contains(output, "1/1") {
e2e.Logf("Deployment has been created Sucessfully!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Depolyment has not been created"))
exutil.By("7) Create a ScaledObject with a cron trigger with timezone applied.")
timezoneScaledObjectTemp := filepath.Join(buildPruningBaseDir, "timezonescaledobject-73296.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+timezoneScaledObjectTemp, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+timezoneScaledObjectTemp, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("7.1) Verifying the scaledobject readiness")
errCheck = wait.Poll(20*time.Second, 380*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("get").Args("scaledobject", "cron-scaledobject", "-n", cmaNs, "-o", "jsonpath={.status.conditions[?(@.status=='True')].status} {.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if output == "True True True" {
e2e.Logf("ScaledObject is Active and Running.")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("ScaledObject is not ready"))
PodName := getPodNameByLabel(oc, "openshift-keda", "app=keda-operator")
waitPodReady(oc, "openshift-keda", "app=keda-operator")
exutil.By(" 8) Check the Logs Containig INFO Reconciling ScaledObject")
log, err := exutil.GetSpecificPodLogs(oc, "openshift-keda", "", PodName[0], "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(log, "INFO\tReconciling ScaledObject")).Should(o.BeTrue())
})
| |||||
test case
|
openshift/openshift-tests-private
|
2fe9d1cb-2236-4d6a-896c-15a6d987a7c7
|
Author:asahay-High-60966-CMA Scale applications based on memory metrics [Serial]
|
['"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-High-60966-CMA Scale applications based on memory metrics [Serial]", func() {
exutil.By("1) Create a kedacontroller with default template")
kedaControllerDefault := filepath.Join(buildPruningBaseDir, "keda-controller-default.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", "openshift-keda", "KedaController", "keda").Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f=" + kedaControllerDefault).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cmaNs := "cma-60966"
defer deleteProject(oc, cmaNs)
createProject(oc, cmaNs)
var output string
exutil.By("2) Creating a Keda HPA deployment")
kedaHPADemoDeploymentTemp := filepath.Join(buildPruningBaseDir, "keda-hpa-demo-deployment.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+kedaHPADemoDeploymentTemp, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+kedaHPADemoDeploymentTemp, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("3) Verify the deployment is available")
errCheck := wait.Poll(20*time.Second, 280*time.Second, func() (bool, error) {
output, err1 := oc.AsAdmin().Run("get").Args("deployment", "-n", cmaNs).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if strings.Contains(output, "keda-hpa-demo-deployment") && strings.Contains(output, "1/1") {
e2e.Logf("Deployment has been created Sucessfully!")
return true, nil
}
return false, nil
})
e2e.Logf("Output: %v", output)
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Depolyment has not been created"))
exutil.By("4) Creating a ScaledObject")
memScaledObjectTemp := filepath.Join(buildPruningBaseDir, "mem-scaledobject.yaml")
defer oc.AsAdmin().Run("delete").Args("-f="+memScaledObjectTemp, "-n", cmaNs).Execute()
err = oc.AsAdmin().Run("create").Args("-f="+memScaledObjectTemp, "-n", cmaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("5) Verifying the scaledobject readiness")
errCheck = wait.Poll(20*time.Second, 380*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("get").Args("scaledobject", "mem-scaledobject", "-n", cmaNs, "-o", "jsonpath={.status.conditions[?(@.status=='True')].status} {.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if output == "True True True" {
e2e.Logf("ScaledObject is Active and Running.")
return true, nil
}
return false, nil
})
e2e.Logf("Output: %v", output)
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("ScaledObject is not ready"))
exutil.By("6) Checking HPA status using jsonpath")
errCheck = wait.Poll(20*time.Second, 380*time.Second, func() (bool, error) {
output, err = oc.AsAdmin().Run("get").Args("hpa", "keda-hpa-mem-scaledobject", "-n", cmaNs, "-o", "jsonpath={.spec.minReplicas} {.spec.maxReplicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// The lower limit for the number of replicas to which the autoscaler can scale down is 1 and the upper limit for the number of replicas to which the autoscaler can scale up is 10
if strings.Contains(output, "1 10") {
e2e.Logf("HPA is configured correctly as expected!")
return true, nil
}
return false, nil
})
e2e.Logf("Output: %v", output)
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("HPA status check failed"))
exutil.By("7) Describing HPA to verify conditions")
errCheck = wait.Poll(20*time.Second, 380*time.Second, func() (bool, error) {
output, err = oc.AsAdmin().Run("get").Args("hpa", "keda-hpa-mem-scaledobject", "-n", cmaNs, "-o", "jsonpath={.status.conditions[?(@.type=='AbleToScale')].status} {.status.conditions[?(@.type=='ScalingActive')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if output == "True True" {
e2e.Logf("HPA conditions are as expected: AbleToScale is True, ScalingActive is True.")
return true, nil
}
return false, nil
})
e2e.Logf("Output: %v", output)
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("HPA conditions are not met"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
e4f81bff-1900-40ff-94fd-ef8bee15cb22
|
Author:weinliu-DEPRECATED-StagerunBoth-High-60991-VPA Install
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:weinliu-DEPRECATED-StagerunBoth-High-60991-VPA Install", func() {
g.By("VPA operator is installed successfully")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
c98124f4-d0c1-4704-9bc6-656fc7569635
|
Author:weinliu-High-70961-Allow cluster admins to specify VPA API client rates and memory-saver [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:weinliu-High-70961-Allow cluster admins to specify VPA API client rates and memory-saver [Serial]", func() {
g.By("VPA operator is installed successfully")
exutil.By("Create a new VerticalPodAutoscalerController ")
vpaNs := "openshift-vertical-pod-autoscaler"
vpacontroller := filepath.Join(buildPruningBaseDir, "vpacontroller-70961.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+vpacontroller, "-n", vpaNs).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+vpacontroller, "-n", vpaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check VPA operator's args")
recommenderArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70961", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.recommender.container.args}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("[\"--kube-api-qps=20.0\",\"--kube-api-burst=60.0\",\"--memory-saver=true\"]").Should(o.Equal(recommenderArgs))
admissioinArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70961", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.admission.container.args}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("[\"--kube-api-qps=30.0\",\"--kube-api-burst=40.0\"]").Should(o.Equal(admissioinArgs))
updaterArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70961", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.updater.container.args}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("[\"--kube-api-qps=20.0\",\"--kube-api-burst=80.0\"]").Should(o.Equal(updaterArgs))
})
| |||||
test case
|
openshift/openshift-tests-private
|
5b74065d-4431-4ae9-8afc-5fade37490c1
|
Author:weinliu-High-70962-Allow cluster admins to specify CPU & Memory requests and limits of VPA controllers [Serial]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:weinliu-High-70962-Allow cluster admins to specify CPU & Memory requests and limits of VPA controllers [Serial]", func() {
exutil.By("VPA operator is installed successfully")
exutil.By("Create a new VerticalPodAutoscalerController ")
vpaNs := "openshift-vertical-pod-autoscaler"
vpacontroller := filepath.Join(buildPruningBaseDir, "vpacontroller-70962.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f="+vpacontroller, "-n", vpaNs).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f="+vpacontroller, "-n", vpaNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check VPA operator's args")
recommenderArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70962", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.recommender.container.resources.requests}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("{\"cpu\":\"60m\",\"memory\":\"60Mi\"}").Should(o.Equal(recommenderArgs))
admissioinArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70962", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.admission.container.resources.requests}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("{\"cpu\":\"40m\",\"memory\":\"40Mi\"}").Should(o.Equal(admissioinArgs))
updaterArgs, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("VerticalPodAutoscalerController", "vpa-70962", "-n", "openshift-vertical-pod-autoscaler", "-o=jsonpath={.spec.deploymentOverrides.updater.container.resources.requests}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect("{\"cpu\":\"80m\",\"memory\":\"80Mi\"}").Should(o.Equal(updaterArgs))
})
| |||||
test case
|
openshift/openshift-tests-private
|
1187a2e2-1be6-4581-a596-9f807ed61dac
|
Author:asahay-StagerunBoth-High-27070-Cluster Resource Override Operator. [Serial]
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-StagerunBoth-High-27070-Cluster Resource Override Operator. [Serial]", func() {
defer deleteAPIService(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ClusterResourceOverride", "cluster", "-n", "clusterresourceoverride-operator").Execute()
createCRClusterresourceoverride(oc)
var err error
var croCR string
errCheck := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
croCR, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterResourceOverride", "cluster", "-n", "clusterresourceoverride-operator").Output()
if err != nil {
e2e.Logf("error %v, please try next round", err)
return false, nil
}
if !strings.Contains(croCR, "cluster") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("can not get cluster with output %v, the error is %v", croCR, err))
e2e.Logf("Operator is installed successfully")
})
| |||||
test case
|
openshift/openshift-tests-private
|
5df8dddb-99da-40f3-95c7-3442be1722c7
|
Author:asahay-Medium-27075-Testing the config changes. [Serial]
|
['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node.go
|
g.It("Author:asahay-Medium-27075-Testing the config changes. [Serial]", func() {
defer deleteAPIService(oc)
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ClusterResourceOverride", "cluster").Execute()
createCRClusterresourceoverride(oc)
var err error
var croCR string
errCheck := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
croCR, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterResourceOverride", "cluster", "-n", "clusterresourceoverride-operator").Output()
if err != nil {
e2e.Logf("error %v, please try next round", err)
return false, nil
}
if !strings.Contains(croCR, "cluster") {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("can not get cluster with output %v, the error is %v", croCR, err))
e2e.Logf("Operator is installed successfully")
g.By("Testing the changes\n")
testCRClusterresourceoverride(oc)
})
| |||||
test
|
openshift/openshift-tests-private
|
76d99cf7-23a1-4061-89b4-9a73a17bb5fe
|
node_utils
|
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/pretty"
"github.com/tidwall/sjson"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
package node
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/pretty"
"github.com/tidwall/sjson"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
type cpuPerfProfile struct {
name string
isolated string
template string
}
type liveProbeTermPeriod struct {
name string
namespace string
terminationgrace int
probeterminationgrace int
template string
}
type startProbeTermPeriod struct {
name string
namespace string
terminationgrace int
probeterminationgrace int
template string
}
type readProbeTermPeriod struct {
name string
namespace string
terminationgrace int
probeterminationgrace int
template string
}
type liveProbeNoTermPeriod struct {
name string
namespace string
terminationgrace int
template string
}
type podWkloadCpuNoAnotation struct {
name string
namespace string
workloadcpu string
template string
}
type podWkloadCpuDescription struct {
name string
namespace string
workloadcpu string
template string
}
type podNoWkloadCpuDescription struct {
name string
namespace string
template string
}
type podGuDescription struct {
name string
namespace string
nodename string
template string
}
type podHelloDescription struct {
name string
namespace string
template string
}
type podModifyDescription struct {
name string
namespace string
mountpath string
command string
args string
restartPolicy string
user string
role string
level string
template string
}
type podLivenessProbe struct {
name string
namespace string
overridelivenessgrace string
terminationgrace int
failurethreshold int
periodseconds int
template string
}
type kubeletCfgMaxpods struct {
name string
labelkey string
labelvalue string
maxpods int
template string
}
type ctrcfgDescription struct {
namespace string
pidlimit int
loglevel string
overlay string
logsizemax string
command string
configFile string
template string
}
type objectTableRefcscope struct {
kind string
name string
}
type podTerminationDescription struct {
name string
namespace string
template string
}
type podInitConDescription struct {
name string
namespace string
template string
}
type podSigstoreDescription struct {
name string
namespace string
template string
}
type podUserNSDescription struct {
name string
namespace string
template string
}
type podSleepDescription struct {
namespace string
template string
}
type kubeletConfigDescription struct {
name string
labelkey string
labelvalue string
template string
}
type memHogDescription struct {
name string
namespace string
labelkey string
labelvalue string
template string
}
type podTwoContainersDescription struct {
name string
namespace string
template string
}
type ctrcfgOverlayDescription struct {
name string
overlay string
template string
}
type podDevFuseDescription struct {
name string
namespace string
template string
}
type podLogLinkDescription struct {
name string
namespace string
template string
}
type podWASM struct {
name string
namespace string
template string
}
type podCpuLoadBalance struct {
name string
namespace string
runtimeclass string
template string
}
type podDisruptionBudget struct {
name string
namespace string
minAvailable string
template string
}
type deployment struct {
name string
namespace string
replicas string
image string
nodename string
template string
}
type triggerAuthenticationDescription struct {
secretname string
namespace string
template string
}
type ImgConfigContDescription struct {
name string
template string
}
type subscriptionDescription struct {
catalogSourceName string
}
func (cpuPerfProfile *cpuPerfProfile) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cpuPerfProfile.template, "-p", "NAME="+cpuPerfProfile.name, "ISOLATED="+cpuPerfProfile.isolated)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cpuPerfProfile *cpuPerfProfile) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("PerformanceProfile", cpuPerfProfile.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podCpuLoadBalance *podCpuLoadBalance) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podCpuLoadBalance.template, "-p", "NAME="+podCpuLoadBalance.name, "NAMESPACE="+podCpuLoadBalance.namespace, "RUNTIMECLASS="+podCpuLoadBalance.runtimeclass)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podCpuLoadBalance *podCpuLoadBalance) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podCpuLoadBalance.namespace, "pod", podCpuLoadBalance.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podWASM *podWASM) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podWASM.template, "-p", "NAME="+podWASM.name, "NAMESPACE="+podWASM.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podWASM *podWASM) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podWASM.namespace, "pod", podWASM.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podDevFuse *podDevFuseDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podDevFuse.template, "-p", "NAME="+podDevFuse.name, "NAMESPACE="+podDevFuse.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podDevFuse *podDevFuseDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podDevFuse.namespace, "pod", podDevFuse.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func checkDevFuseMount(oc *exutil.CLI, namespace string, podname string) error {
return wait.Poll(1*time.Second, 3*time.Second, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", namespace, podname, "/bin/bash", "-c", "ls -al /dev | grep fuse").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(status, "fuse") {
e2e.Logf("\ndev fuse is mounted inside the pod")
return true, nil
}
return false, nil
})
}
func (podLogLink *podLogLinkDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podLogLink.template, "-p", "NAME="+podLogLink.name, "NAMESPACE="+podLogLink.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podLogLink *podLogLinkDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podLogLink.namespace, "pod", podLogLink.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (liveProbe *liveProbeTermPeriod) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", liveProbe.template, "-p", "NAME="+liveProbe.name, "NAMESPACE="+liveProbe.namespace, "TERMINATIONGRACE="+strconv.Itoa(liveProbe.terminationgrace), "PROBETERMINATIONGRACE="+strconv.Itoa(liveProbe.probeterminationgrace))
o.Expect(err).NotTo(o.HaveOccurred())
}
func (liveProbe *liveProbeTermPeriod) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", liveProbe.namespace, "pod", liveProbe.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (startProbe *startProbeTermPeriod) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", startProbe.template, "-p", "NAME="+startProbe.name, "NAMESPACE="+startProbe.namespace, "TERMINATIONGRACE="+strconv.Itoa(startProbe.terminationgrace), "PROBETERMINATIONGRACE="+strconv.Itoa(startProbe.probeterminationgrace))
o.Expect(err).NotTo(o.HaveOccurred())
}
func (startProbe *startProbeTermPeriod) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", startProbe.namespace, "pod", startProbe.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (readProbe *readProbeTermPeriod) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", readProbe.template, "-p", "NAME="+readProbe.name, "NAMESPACE="+readProbe.namespace, "TERMINATIONGRACE="+strconv.Itoa(readProbe.terminationgrace), "PROBETERMINATIONGRACE="+strconv.Itoa(readProbe.probeterminationgrace))
o.Expect(err).NotTo(o.HaveOccurred())
}
func (readProbe *readProbeTermPeriod) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", readProbe.namespace, "pod", readProbe.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (liveProbe *liveProbeNoTermPeriod) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", liveProbe.template, "-p", "NAME="+liveProbe.name, "NAMESPACE="+liveProbe.namespace, "TERMINATIONGRACE="+strconv.Itoa(liveProbe.terminationgrace))
o.Expect(err).NotTo(o.HaveOccurred())
}
func (liveProbe *liveProbeNoTermPeriod) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", liveProbe.namespace, "pod", liveProbe.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podNoWkloadCpu *podNoWkloadCpuDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podNoWkloadCpu.template, "-p", "NAME="+podNoWkloadCpu.name, "NAMESPACE="+podNoWkloadCpu.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podNoWkloadCpu *podNoWkloadCpuDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podNoWkloadCpu.namespace, "pod", podNoWkloadCpu.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podGu *podGuDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podGu.template, "-p", "NAME="+podGu.name, "NAMESPACE="+podGu.namespace, "NODENAME="+podGu.nodename)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podGu *podGuDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podGu.namespace, "pod", podGu.name, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func getWorkersList(oc *exutil.CLI) []string {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Fields(output)
}
func getCpuNum(oc *exutil.CLI, node string) int {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", node, "-o=jsonpath={.status.capacity.cpu}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
cpuNum, err := strconv.Atoi(output)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cpu num is: [%d]\n", cpuNum)
return cpuNum
}
func getCgroupVersion(oc *exutil.CLI) string {
workerNodes := getWorkersList(oc)
cgroupV, err := exutil.DebugNodeWithChroot(oc, workerNodes[0], "/bin/bash", "-c", "stat -fc %T /sys/fs/cgroup")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("cgroup version info is: [%v]\n", cgroupV)
if strings.Contains(string(cgroupV), "tmpfs") {
return "tmpfs"
} else if strings.Contains(string(cgroupV), "cgroup2fs") {
return "cgroup2fs"
} else {
return cgroupV
}
}
func checkReservedCpu(oc *exutil.CLI, reservedCpu string) {
workerNodes := getWorkersList(oc)
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
for _, node := range workerNodes {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(statusErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", node, nodeStatus)
if nodeStatus == "True" {
kubeletConf, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf | grep reservedSystemCPUs")
o.Expect(err).NotTo(o.HaveOccurred())
//need match : "reservedSystemCPUs": "0-3"
cpuStr := `"reservedSystemCPUs": "` + reservedCpu + `"`
if strings.Contains(string(kubeletConf), cpuStr) {
e2e.Logf("Reserved Cpu: [%s], is expected \n", kubeletConf)
crioOutput, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "pgrep crio | while read i; do taskset -cp $i; done")
o.Expect(err).NotTo(o.HaveOccurred())
crioCpuStr := "current affinity list: " + reservedCpu
if strings.Contains(crioOutput, crioCpuStr) {
e2e.Logf("crio use CPU: [%s], is expected \n", crioOutput)
return true, nil
} else {
e2e.Logf("crio use CPU: [%s], not expected \n", crioOutput)
return false, nil
}
} else {
e2e.Logf("Reserved Cpu: [%s], not expected \n", kubeletConf)
return false, nil
}
} else {
e2e.Logf("\n NODE %s IS NOT READY\n", node)
}
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Check reservedCpu failed!\n"))
}
// deployment generator function (uses default image from template)
func NewDeployment(name, namespace, replicas, template string) *deployment {
return &deployment{name, namespace, replicas, "", "", template}
}
// deployment generator function (uses default image from template and deploy on specific node)
func NewDeploymentWithNode(name, namespace, replicas, hostname, template string) *deployment {
return &deployment{name, namespace, replicas, "", hostname, template}
}
// deployment generator function with image override
func NewDeploymentWithImage(name, namespace, replicas, image, template string) *deployment {
return &deployment{name, namespace, replicas, image, "", template}
}
// this function consider parameter image and hostname can't coexist by default
func (deployment *deployment) create(oc *exutil.CLI) {
imageArg := ""
nodenameArg := ""
if deployment.image != "" {
imageArg = "IMAGE=" + deployment.image
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", deployment.template, "-p", "NAME="+deployment.name, "NAMESPACE="+deployment.namespace, "REPLICAS="+deployment.replicas, imageArg)
o.Expect(err).NotTo(o.HaveOccurred())
} else if deployment.nodename != "" {
nodenameArg = "NODENAME=" + deployment.nodename
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", deployment.template, "-p", "NAME="+deployment.name, "NAMESPACE="+deployment.namespace, "REPLICAS="+deployment.replicas, nodenameArg)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", deployment.template, "-p", "NAME="+deployment.name, "NAMESPACE="+deployment.namespace, "REPLICAS="+deployment.replicas)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
// waits until all the pods in the created deployment are in Ready state
func (deployment *deployment) waitForCreation(oc *exutil.CLI, timeoutMin int) {
err := wait.Poll(3*time.Second, time.Duration(timeoutMin)*time.Minute, func() (bool, error) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", deployment.name, "-o=jsonpath={.status.readyReplicas}", "-n", deployment.namespace).Output()
if err != nil {
e2e.Logf("Command failed with error: %s .... there are no ready workloads", err)
return false, nil
}
if (msg == "" && deployment.replicas == "0") || msg == deployment.replicas {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to create deployment %v in namespace %v", deployment.name, deployment.namespace))
}
func (deployment *deployment) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", deployment.namespace, "deployment", deployment.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// PDB generator function
func NewPDB(name, namespace, minAvailable, template string) *podDisruptionBudget {
return &podDisruptionBudget{name, namespace, minAvailable, template}
}
func (pdb *podDisruptionBudget) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pdb.template, "-p", "NAME="+pdb.name, "NAMESPACE="+pdb.namespace, "MIN_AVAILABLE="+pdb.minAvailable)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (pdb *podDisruptionBudget) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", pdb.namespace, "poddisruptionbudget", pdb.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
type cmaKedaControllerDescription struct {
level string
template string
name string
namespace string
}
type pvcKedaControllerDescription struct {
level string
template string
name string
namespace string
watchNamespace string
}
type runtimeTimeoutDescription struct {
name string
labelkey string
labelvalue string
template string
}
type systemReserveESDescription struct {
name string
labelkey string
labelvalue string
template string
}
type upgradeMachineconfig1Description struct {
name string
template string
}
type upgradeMachineconfig2Description struct {
name string
template string
}
func (podWkloadCpu *podWkloadCpuDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podWkloadCpu.template, "-p", "NAME="+podWkloadCpu.name, "NAMESPACE="+podWkloadCpu.namespace, "WORKLOADCPU="+podWkloadCpu.workloadcpu)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podWkloadCpu *podWkloadCpuDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podWkloadCpu.namespace, "pod", podWkloadCpu.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podWkloadCpuNoAnota *podWkloadCpuNoAnotation) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podWkloadCpuNoAnota.template, "-p", "NAME="+podWkloadCpuNoAnota.name, "NAMESPACE="+podWkloadCpuNoAnota.namespace, "WORKLOADCPU="+podWkloadCpuNoAnota.workloadcpu)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podWkloadCpuNoAnota *podWkloadCpuNoAnotation) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podWkloadCpuNoAnota.namespace, "pod", podWkloadCpuNoAnota.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podHello *podHelloDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podHello.template, "-p", "NAME="+podHello.name, "NAMESPACE="+podHello.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podHello *podHelloDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podHello.namespace, "pod", podHello.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (ctrcfg *ctrcfgOverlayDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ctrcfg.template, "-p", "NAME="+ctrcfg.name, "OVERLAY="+ctrcfg.overlay)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podUserNS *podUserNSDescription) createPodUserNS(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podUserNS.template, "-p", "NAME="+podUserNS.name, "NAMESPACE="+podUserNS.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podUserNS *podUserNSDescription) deletePodUserNS(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podUserNS.namespace, "pod", podUserNS.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (kubeletConfig *kubeletConfigDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", kubeletConfig.template, "-p", "NAME="+kubeletConfig.name, "LABELKEY="+kubeletConfig.labelkey, "LABELVALUE="+kubeletConfig.labelvalue)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (memHog *memHogDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", memHog.template, "-p", "NAME="+memHog.name, "LABELKEY="+memHog.labelkey, "LABELVALUE="+memHog.labelvalue)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podSleep *podSleepDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podSleep.template, "-p", "NAMESPACE="+podSleep.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (runtimeTimeout *runtimeTimeoutDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", runtimeTimeout.template, "-p", "NAME="+runtimeTimeout.name, "LABELKEY="+runtimeTimeout.labelkey, "LABELVALUE="+runtimeTimeout.labelvalue)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (runtimeTimeout *runtimeTimeoutDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("kubeletconfig", runtimeTimeout.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (systemReserveES *systemReserveESDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", systemReserveES.template, "-p", "NAME="+systemReserveES.name, "LABELKEY="+systemReserveES.labelkey, "LABELVALUE="+systemReserveES.labelvalue)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (systemReserveES *systemReserveESDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("kubeletconfig", systemReserveES.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (upgradeMachineconfig1 *upgradeMachineconfig1Description) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", upgradeMachineconfig1.template, "-p", "NAME="+upgradeMachineconfig1.name)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (upgradeMachineconfig1 *upgradeMachineconfig1Description) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("kubeletconfig", upgradeMachineconfig1.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (upgradeMachineconfig2 *upgradeMachineconfig2Description) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", upgradeMachineconfig2.template, "-p", "NAME="+upgradeMachineconfig2.name)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (upgradeMachineconfig2 *upgradeMachineconfig2Description) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("kubeletconfig", upgradeMachineconfig2.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// Delete Namespace with all resources
func (podSleep *podSleepDescription) deleteProject(oc *exutil.CLI) error {
e2e.Logf("Deleting Project ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", podSleep.namespace).Execute()
}
func (podInitCon *podInitConDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podInitCon.template, "-p", "NAME="+podInitCon.name, "NAMESPACE="+podInitCon.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podInitCon *podInitConDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podInitCon.namespace, "pod", podInitCon.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podSigstore *podSigstoreDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podSigstore.template, "-p", "NAME="+podSigstore.name, "NAMESPACE="+podSigstore.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podSigstore *podSigstoreDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podSigstore.namespace, "pod", podSigstore.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
func (kubeletcfg *kubeletCfgMaxpods) createKubeletConfigMaxpods(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", kubeletcfg.template, "-p", "NAME="+kubeletcfg.name, "LABELKEY="+kubeletcfg.labelkey, "LABELVALUE="+kubeletcfg.labelvalue, "MAXPODS="+strconv.Itoa(kubeletcfg.maxpods))
if err != nil {
e2e.Logf("the err of createKubeletConfigMaxpods:%v", err)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (kubeletcfg *kubeletCfgMaxpods) deleteKubeletConfigMaxpods(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("kubeletconfig", kubeletcfg.name).Execute()
if err != nil {
e2e.Logf("the err of deleteKubeletConfigMaxpods:%v", err)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (pod *podLivenessProbe) createPodLivenessProbe(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "OVERRIDELIVENESSGRACE="+pod.overridelivenessgrace, "TERMINATIONGRACE="+strconv.Itoa(pod.terminationgrace), "FAILURETHRESHOLD="+strconv.Itoa(pod.failurethreshold), "PERIODSECONDS="+strconv.Itoa(pod.periodseconds))
if err != nil {
e2e.Logf("the err of createPodLivenessProbe:%v", err)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (pod *podLivenessProbe) deletePodLivenessProbe(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", pod.namespace, "pod", pod.name).Execute()
if err != nil {
e2e.Logf("the err of deletePodLivenessProbe:%v", err)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podModify *podModifyDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podModify.template, "-p", "NAME="+podModify.name, "NAMESPACE="+podModify.namespace, "MOUNTPATH="+podModify.mountpath, "COMMAND="+podModify.command, "ARGS="+podModify.args, "POLICY="+podModify.restartPolicy, "USER="+podModify.user, "ROLE="+podModify.role, "LEVEL="+podModify.level)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podModify *podModifyDescription) delete(oc *exutil.CLI) error {
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podModify.namespace, "pod", podModify.name).Execute()
}
func (podTermination *podTerminationDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podTermination.template, "-p", "NAME="+podTermination.name, "NAMESPACE="+podTermination.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podTermination *podTerminationDescription) delete(oc *exutil.CLI) error {
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podTermination.namespace, "pod", podTermination.name).Execute()
}
func createResourceFromTemplate(oc *exutil.CLI, parameters ...string) error {
var jsonCfg string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "node-config.json")
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
jsonCfg = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters))
e2e.Logf("The resource is %s", jsonCfg)
return oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", jsonCfg).Execute()
}
func podStatusReason(oc *exutil.CLI) error {
e2e.Logf("check if pod is available")
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].status.initContainerStatuses[*].state.waiting.reason}", "-n", oc.Namespace()).Output()
e2e.Logf("the status of pod is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "CrashLoopBackOff") {
e2e.Logf(" Pod failed status reason is :%s", status)
return true, nil
}
return false, nil
})
}
func podStatusterminatedReason(oc *exutil.CLI) error {
e2e.Logf("check if pod is available")
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].status.initContainerStatuses[*].state.terminated.reason}", "-n", oc.Namespace()).Output()
e2e.Logf("the status of pod is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "Error") {
e2e.Logf(" Pod failed status reason is :%s", status)
return true, nil
}
return false, nil
})
}
func podStatus(oc *exutil.CLI, namespace string, podName string) error {
e2e.Logf("check if pod is available")
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(status, "True") {
e2e.Logf("Pod is running and container is Ready!")
return true, nil
}
return false, nil
})
}
func podEvent(oc *exutil.CLI, timeout int, keyword string) error {
return wait.Poll(10*time.Second, time.Duration(timeout)*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", oc.Namespace()).Output()
if err != nil {
e2e.Logf("Can't get events from test project, error: %s. Trying again", err)
return false, nil
}
if matched, _ := regexp.MatchString(keyword, output); matched {
e2e.Logf(keyword)
return true, nil
}
return false, nil
})
}
func kubeletNotPromptDupErr(oc *exutil.CLI, keyword string, name string) error {
return wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) {
re := regexp.MustCompile(keyword)
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeletconfig", name, "-o=jsonpath={.status.conditions[*]}").Output()
if err != nil {
e2e.Logf("Can't get kubeletconfig status, error: %s. Trying again", err)
return false, nil
}
found := re.FindAllString(output, -1)
if lenStr := len(found); lenStr > 1 {
e2e.Logf("[%s] appear %d times.", keyword, lenStr)
return false, nil
} else if lenStr == 1 {
e2e.Logf("[%s] appear %d times.\nkubeletconfig not prompt duplicate error message", keyword, lenStr)
return true, nil
} else {
e2e.Logf("error: kubelet not prompt [%s]", keyword)
return false, nil
}
})
}
func volStatus(oc *exutil.CLI) error {
e2e.Logf("check content of volume")
return wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("init-volume", "-c", "hello-pod", "cat", "/init-test/volume-test", "-n", oc.Namespace()).Output()
e2e.Logf("The content of the vol is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "This is OCP volume test") {
e2e.Logf(" Init containers with volume work fine \n")
return true, nil
}
return false, nil
})
}
// ContainerSccStatus get scc status of container
func ContainerSccStatus(oc *exutil.CLI) error {
return wait.Poll(1*time.Second, 1*time.Second, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "hello-pod", "-o=jsonpath={.spec.securityContext.seLinuxOptions.*}", "-n", oc.Namespace()).Output()
e2e.Logf("The Container SCC Content is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "unconfined_u unconfined_r s0:c25,c968") {
e2e.Logf("SeLinuxOptions in pod applied to container Sucessfully \n")
return true, nil
}
return false, nil
})
}
func (ctrcfg *ctrcfgDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ctrcfg.template, "-p", "LOGLEVEL="+ctrcfg.loglevel, "OVERLAY="+ctrcfg.overlay, "LOGSIZEMAX="+ctrcfg.logsizemax)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (ctrcfg *ctrcfgDescription) checkCtrcfgParameters(oc *exutil.CLI) error {
return wait.Poll(10*time.Minute, 11*time.Minute, func() (bool, error) {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/worker=", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
node := strings.Fields(nodeName)
for _, v := range node {
nodeStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", v), "-o=jsonpath={.status.conditions[3].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", v, nodeStatus)
if nodeStatus == "Ready" {
criostatus, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args(`node/`+fmt.Sprintf("%s", v), "--", "chroot", "/host", "crio", "config").OutputToFile("crio.conf")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(`\nCRI-O PARAMETER ON THE WORKER NODE :` + fmt.Sprintf("%s", v))
e2e.Logf("\ncrio config file path is %v", criostatus)
wait.Poll(2*time.Second, 1*time.Minute, func() (bool, error) {
result, err1 := exec.Command("bash", "-c", "cat "+criostatus+" | egrep 'pids_limit|log_level'").Output()
if err != nil {
e2e.Failf("the result of ReadFile:%v", err1)
return false, nil
}
e2e.Logf("\nCtrcfg Parameters is %s", result)
if strings.Contains(string(result), "debug") && strings.Contains(string(result), "2048") {
e2e.Logf("\nCtrcfg parameter pod limit and log_level configured successfully")
return true, nil
}
return false, nil
})
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return true, nil
})
}
func (podTermination *podTerminationDescription) getTerminationGrace(oc *exutil.CLI) error {
e2e.Logf("check terminationGracePeriodSeconds period")
return wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
nodename, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].spec.nodeName}", "-n", podTermination.namespace).Output()
e2e.Logf("The nodename is %v", nodename)
o.Expect(err).NotTo(o.HaveOccurred())
nodeReadyBool, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", nodename), "-o=jsonpath={.status.conditions[?(@.reason=='KubeletReady')].status}").Output()
e2e.Logf("The Node Ready status is %v", nodeReadyBool)
o.Expect(err).NotTo(o.HaveOccurred())
containerID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].status.containerStatuses[0].containerID}", "-n", podTermination.namespace).Output()
e2e.Logf("The containerID is %v", containerID)
o.Expect(err).NotTo(o.HaveOccurred())
if nodeReadyBool == "True" {
terminationGrace, err := exutil.DebugNodeWithChroot(oc, nodename, "systemctl", "show", containerID)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(terminationGrace), "TimeoutStopUSec=1min 30s") {
e2e.Logf("\nTERMINATION GRACE PERIOD IS SET CORRECTLY")
return true, nil
}
e2e.Logf("\ntermination grace is NOT Updated")
return false, nil
}
return false, nil
})
}
func (podInitCon *podInitConDescription) containerExit(oc *exutil.CLI) error {
return wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
initConStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].status.initContainerStatuses[0].state.terminated.reason}", "-n", podInitCon.namespace).Output()
e2e.Logf("The initContainer status is %v", initConStatus)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(initConStatus), "Completed") {
e2e.Logf("The initContainer exit normally")
return true, nil
}
e2e.Logf("The initContainer not exit!")
return false, nil
})
}
func (podInitCon *podInitConDescription) deleteInitContainer(oc *exutil.CLI) (string, error) {
nodename, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].spec.nodeName}", "-n", podInitCon.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
containerID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].status.initContainerStatuses[0].containerID}", "-n", podInitCon.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The containerID is %v", containerID)
initContainerID := string(containerID)[8:]
e2e.Logf("The initContainerID is %s", initContainerID)
return exutil.DebugNodeWithChroot(oc, fmt.Sprintf("%s", nodename), "crictl", "rm", initContainerID)
}
func (podInitCon *podInitConDescription) initContainerNotRestart(oc *exutil.CLI) error {
return wait.Poll(3*time.Minute, 6*time.Minute, func() (bool, error) {
re := regexp.MustCompile("running")
podname, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", podInitCon.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
output, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(string(podname), "-n", podInitCon.namespace, "--", "cat", "/mnt/data/test").Output()
e2e.Logf("The /mnt/data/test: %s", output)
o.Expect(err).NotTo(o.HaveOccurred())
found := re.FindAllString(output, -1)
if lenStr := len(found); lenStr > 1 {
e2e.Logf("initContainer restart %d times.", (lenStr - 1))
return false, nil
} else if lenStr == 1 {
e2e.Logf("initContainer not restart")
return true, nil
}
return false, nil
})
}
func checkNodeStatus(oc *exutil.CLI, nodeName string, expectedStatus string) {
var expectedStatus1 string
if expectedStatus == "Ready" {
expectedStatus1 = "True"
} else if expectedStatus == "NotReady" {
expectedStatus1 = "Unknown"
} else {
err1 := fmt.Errorf("TBD supported node status")
o.Expect(err1).NotTo(o.HaveOccurred())
}
err := wait.Poll(5*time.Second, 15*time.Minute, func() (bool, error) {
statusOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", nodeName, "-ojsonpath={.status.conditions[-1].status}").Output()
if err != nil {
e2e.Logf("\nGet node status with error : %v", err)
return false, nil
}
e2e.Logf("Expect Node %s in state %v, kubelet status is %s", nodeName, expectedStatus, statusOutput)
if statusOutput != expectedStatus1 {
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Node %s is not in expected status %s", nodeName, expectedStatus))
}
func getSingleWorkerNode(oc *exutil.CLI) string {
workerNodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/worker=", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nWorker Node Name is %v", workerNodeName)
return workerNodeName
}
func getSingleMasterNode(oc *exutil.CLI) string {
masterNodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/master=", "-o=jsonpath={.items[1].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nMaster Node Name is %v", masterNodeName)
return masterNodeName
}
func getPodNodeName(oc *exutil.CLI, namespace string) string {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].spec.nodeName}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Pod Node Name is %v \n", nodeName)
return nodeName
}
func getPodNetNs(oc *exutil.CLI, hostname string) (string, error) {
NetNsStr, err := exutil.DebugNodeWithChroot(oc, hostname, "/bin/bash", "-c", "journalctl -u crio --since=\"5 minutes ago\" | grep pod-56266 | grep NetNS")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("NetNs string : %v", NetNsStr)
keyword := "NetNS:[^\\s]*"
re := regexp.MustCompile(keyword)
found := re.FindAllString(NetNsStr, -1)
if len(found) == 0 {
e2e.Logf("can not find NetNS for pod")
return "", fmt.Errorf("can not find NetNS for pod")
}
e2e.Logf("found : %v \n", found[0])
NetNs := strings.Split(found[0], ":")
e2e.Logf("NetNs : %v \n", NetNs[1])
return NetNs[1], nil
}
func addLabelToResource(oc *exutil.CLI, label string, resourceName string, resource string) {
_, err := oc.AsAdmin().WithoutNamespace().Run("label").Args(resource, resourceName, label, "--overwrite").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nLabel Added")
}
func removeLabelFromNode(oc *exutil.CLI, label string, workerNodeName string, resource string) {
_, err := oc.AsAdmin().WithoutNamespace().Run("label").Args(resource, workerNodeName, label).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nLabel Removed")
}
func rebootNode(oc *exutil.CLI, workerNodeName string) error {
return wait.Poll(1*time.Second, 1*time.Second, func() (bool, error) {
e2e.Logf("\nRebooting node %s....", workerNodeName)
_, err1 := exutil.DebugNodeWithChroot(oc, workerNodeName, "shutdown", "-r", "+1", "-t", "30")
o.Expect(err1).NotTo(o.HaveOccurred())
return true, nil
})
}
func masterNodeLog(oc *exutil.CLI, masterNode string) error {
return wait.Poll(1*time.Second, 1*time.Second, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args(`node/`+masterNode, "--", "chroot", "/host", "journalctl", "-u", "crio").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(status, "layer not known") {
e2e.Logf("\nTest successfully executed")
} else {
e2e.Logf("\nTest fail executed, and try next")
return false, nil
}
return true, nil
})
}
func getmcpStatus(oc *exutil.CLI, nodeName string) error {
return wait.Poll(60*time.Second, 15*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeName, "-ojsonpath={.status.conditions[?(@.type=='Updating')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nCurrent mcp UPDATING Status is %s\n", status)
if strings.Contains(status, "False") {
e2e.Logf("\nmcp updated successfully ")
} else {
e2e.Logf("\nmcp is still in UPDATING state")
return false, nil
}
return true, nil
})
}
func getWorkerNodeDescribe(oc *exutil.CLI, workerNodeName string) error {
return wait.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {
nodeStatus, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("node", workerNodeName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(nodeStatus, "EvictionThresholdMet") {
e2e.Logf("\n WORKER NODE MET EVICTION THRESHOLD\n ")
} else {
e2e.Logf("\n WORKER NODE DO NOT HAVE MEMORY PRESSURE\n ")
return false, nil
}
return true, nil
})
}
func checkOverlaySize(oc *exutil.CLI, overlaySize string) error {
return wait.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {
workerNode := getSingleWorkerNode(oc)
//overlayString, err := exutil.DebugNodeWithChroot(oc, workerNode, "/bin/bash", "-c", "head -n 7 /etc/containers/storage.conf | grep size")
overlayString, err := exutil.DebugNodeWithChroot(oc, workerNode, "/bin/bash", "-c", "head -n 7 /etc/containers/storage.conf | grep size || true")
if err != nil {
return false, err
}
e2e.Logf("overlaySize string : %v", overlayString)
if strings.Contains(string(overlayString), overlaySize) {
e2e.Logf("overlay size check successfully")
} else {
e2e.Logf("overlay size check failed")
return false, nil
}
return true, nil
})
}
func checkPodOverlaySize(oc *exutil.CLI, overlaySize string) error {
return wait.Poll(1*time.Second, 3*time.Second, func() (bool, error) {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
overlayString, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", oc.Namespace(), podName, "/bin/bash", "-c", "df -h | grep overlay").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("overlayString is : %v", overlayString)
overlaySizeStr := strings.Fields(string(overlayString))
e2e.Logf("overlaySize : %s", overlaySizeStr[1])
overlaySizeInt := strings.Split(string(overlaySizeStr[1]), ".")[0] + "G"
e2e.Logf("overlaySizeInt : %s", overlaySizeInt)
if overlaySizeInt == overlaySize {
e2e.Logf("pod overlay size is correct")
} else {
e2e.Logf("pod overlay size is not correct !!!")
return false, nil
}
return true, nil
})
}
func checkNetNs(oc *exutil.CLI, hostname string, netNsPath string) error {
return wait.Poll(1*time.Second, 3*time.Second, func() (bool, error) {
result, _ := exutil.DebugNodeWithChroot(oc, hostname, "ls", "-l", netNsPath)
e2e.Logf("the check result: %v", result)
if strings.Contains(string(result), "No such file or directory") {
e2e.Logf("the NetNS file is cleaned successfully")
} else {
e2e.Logf("the NetNS file still exist")
return false, nil
}
return true, nil
})
}
// this function check if crontab events include error like : MountVolume.SetUp failed for volume "serviceca" : object "openshift-image-registry"/"serviceca" not registered
func checkEventsForErr(oc *exutil.CLI) error {
return wait.Poll(1*time.Second, 3*time.Second, func() (bool, error) {
// get all cronjob's namespace from:
// NAMESPACE NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
// openshift-image-registry image-pruner 0 0 * * * False 0 <none> 4h36m
// openshift-operator-lifecycle-manager collect-profiles */15 * * * * False 0 9m11s 4h40m
allcronjobs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cronjob", "--all-namespaces", "-o=jsonpath={range .items[*]}{@.metadata.namespace}{\"\\n\"}{end}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the cronjobs namespaces are: %v", allcronjobs)
for _, s := range strings.Fields(allcronjobs) {
events, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", s).Output()
o.Expect(err).NotTo(o.HaveOccurred())
keyword := "MountVolume.SetUp failed for volume.*object.*not registered"
re := regexp.MustCompile(keyword)
found := re.FindAllString(events, -1)
if len(found) > 0 {
e2e.Logf("The events of ns [%v] hit the error: %v", s, found[0])
return false, nil
}
}
e2e.Logf("all the crontab events don't hit the error: MountVolume.SetUp failed for volume ... not registered")
return true, nil
})
}
func cleanupObjectsClusterScope(oc *exutil.CLI, objs ...objectTableRefcscope) error {
return wait.Poll(1*time.Second, 1*time.Second, func() (bool, error) {
for _, v := range objs {
e2e.Logf("\n Start to remove: %v", v)
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(v.kind, v.name).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(status, "Error") {
e2e.Logf("Error getting resources... Seems resources objects are already deleted. \n")
return true, nil
}
_, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args(v.kind, v.name).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
return true, nil
})
}
func (podTwoContainers *podTwoContainersDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podTwoContainers.template, "-p", "NAME="+podTwoContainers.name, "NAMESPACE="+podTwoContainers.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podTwoContainers *podTwoContainersDescription) delete(oc *exutil.CLI) error {
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podTwoContainers.namespace, "pod", podTwoContainers.name).Execute()
}
func (podUserNS *podUserNSDescription) crioWorkloadConfigExist(oc *exutil.CLI) error {
return wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodename := nodeList.Items[0].Name
workloadString, _ := exutil.DebugNodeWithChroot(oc, nodename, "cat", "/etc/crio/crio.conf.d/00-default")
//not handle err as a workaround of issue: debug container needs more time to start in 4.13&4.14
//o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(workloadString), "crio.runtime.workloads.openshift-builder") && strings.Contains(string(workloadString), "io.kubernetes.cri-o.userns-mode") && strings.Contains(string(workloadString), "io.kubernetes.cri-o.Devices") {
e2e.Logf("the crio workload exist in /etc/crio/crio.conf.d/00-default")
} else {
e2e.Logf("the crio workload not exist in /etc/crio/crio.conf.d/00-default")
return false, nil
}
return true, nil
})
}
func (podUserNS *podUserNSDescription) userContainersExistForNS(oc *exutil.CLI) error {
return wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodename := nodeList.Items[0].Name
xContainers, _ := exutil.DebugNodeWithChroot(oc, nodename, "bash", "-c", "cat /etc/subuid /etc/subgid")
//not handle err as a workaround of issue: debug container needs more time to start in 4.13&4.14
//o.Expect(err).NotTo(o.HaveOccurred())
if strings.Count(xContainers, "containers") == 2 {
e2e.Logf("the user containers exist in /etc/subuid and /etc/subgid")
} else {
e2e.Logf("the user containers not exist in /etc/subuid and /etc/subgid")
return false, nil
}
return true, nil
})
}
func (podUserNS *podUserNSDescription) podRunInUserNS(oc *exutil.CLI) error {
return wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", podUserNS.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
idString, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", podUserNS.namespace, podName, "id").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(idString), "uid=0(root) gid=0(root) groups=0(root)") {
e2e.Logf("the user id in pod is root")
podUserNSstr, _ := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", podUserNS.namespace, podName, "lsns", "-o", "NS", "-t", "user").Output()
//not handle err due to the container crash like: unable to upgrade connection: container not found
//o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("string(podUserNS) is : %s", string(podUserNSstr))
podNS := strings.Fields(string(podUserNSstr))
e2e.Logf("pod user namespace : %s", podNS[1])
nodename, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].spec.nodeName}", "-n", podUserNS.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeUserNS, _ := exutil.DebugNodeWithChroot(oc, string(nodename), "/bin/bash", "-c", "lsns -t user | grep /usr/lib/systemd/systemd")
//not handle err as a workaround of issue: debug container needs more time to start in 4.13&4.14
//o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("host user ns string : %v", nodeUserNS)
nodeNSstr := strings.Split(string(nodeUserNS), "\n")
nodeNS := strings.Fields(nodeNSstr[0])
e2e.Logf("host user namespace : %s", nodeNS[0])
if nodeNS[0] == podNS[1] {
e2e.Logf("pod run in the same user namespace with host")
return false, nil
}
e2e.Logf("pod run in different user namespace with host")
return true, nil
}
e2e.Logf("the user id in pod is not root")
return false, nil
})
}
func configExist(oc *exutil.CLI, config []string, configPath string) error {
return wait.Poll(1*time.Second, 3*time.Second, func() (bool, error) {
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodename := nodeList.Items[0].Name
configString, err := exutil.DebugNodeWithChroot(oc, nodename, "cat", configPath)
e2e.Logf("the %s is: \n%v", configPath, configString)
o.Expect(err).NotTo(o.HaveOccurred())
for _, conf := range config {
if !strings.Contains(string(configString), conf) {
e2e.Logf("the config: %s not exist in %s", conf, configPath)
return false, nil
}
}
e2e.Logf("all the config exist in %s", configPath)
return true, nil
})
}
func checkMachineConfigPoolStatus(oc *exutil.CLI, nodeSelector string) error {
//when mcp master change cgroup from v2 to v1, it takes more than 15 minutes
return wait.Poll(30*time.Second, 30*time.Minute, func() (bool, error) {
mCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.machineCount}").Output()
unmCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.unavailableMachineCount}").Output()
dmCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.degradedMachineCount}").Output()
rmCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.readyMachineCount}").Output()
e2e.Logf("MachineCount:%v unavailableMachineCount:%v degradedMachineCount:%v ReadyMachineCount:%v", mCount, unmCount, dmCount, rmCount)
if strings.Compare(mCount, rmCount) == 0 && strings.Compare(unmCount, dmCount) == 0 {
return true, nil
}
return false, nil
})
}
// this func check the pod's cpu setting override the host default
func overrideWkloadCpu(oc *exutil.CLI, cpuset string, namespace string) error {
return wait.Poll(1*time.Second, 3*time.Second, func() (bool, error) {
podname, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
cpuSet, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(string(podname), "-n", namespace, "--", "cat", "/sys/fs/cgroup/cpuset.cpus.effective").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The cpuset is : %s", cpuSet)
if cpuset == "" {
//if cpuset == "", the pod will keep default value as the /sys/fs/cgroup/cpuset.cpus.effective on node
nodename, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].spec.nodeName}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The nodename is %v", nodename)
cpusetDeft, err := exutil.DebugNodeWithChroot(oc, nodename, "cat", "/sys/fs/cgroup/cpuset.cpus.effective")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The cpuset of host is : %s", cpusetDeft)
if strings.Contains(cpusetDeft, cpuSet) {
e2e.Logf("the cpuset not take effect in the pod")
return true, nil
}
} else if cpuSet == cpuset {
e2e.Logf("the pod override the default workload setting")
return true, nil
}
return false, nil
})
}
// this func check the pod's cpu setting keep the same as host default
func defaultWkloadCpu(oc *exutil.CLI, cpuset string, namespace string) error {
return wait.Poll(1*time.Second, 3*time.Second, func() (bool, error) {
podname, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
cpuSet, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(string(podname), "-n", namespace, "--", "cat", "/sys/fs/cgroup/cpuset.cpus.effective").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The cpuset of pod is : %s", cpuSet)
nodename, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].spec.nodeName}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The nodename is %v", nodename)
cpusetDeft, err := exutil.DebugNodeWithChroot(oc, nodename, "cat", "/sys/fs/cgroup/cpuset.cpus.effective")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The cpuset of host is : %s", cpusetDeft)
if strings.Contains(cpusetDeft, cpuSet) {
if string(cpuSet) != cpuset {
e2e.Logf("the pod keep the default workload setting")
return true, nil
}
e2e.Logf("the pod specified value is the same as default, invalid test!")
return false, nil
}
return false, nil
})
}
// this function create CMA(Keda) operator
func createKedaOperator(oc *exutil.CLI) {
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
operatorGroup := filepath.Join(buildPruningBaseDir, "operatorgroup.yaml")
subscription := filepath.Join(buildPruningBaseDir, "subscription.yaml")
nsOperator := filepath.Join(buildPruningBaseDir, "ns-keda-operator.yaml")
operatorNamespace := "openshift-keda"
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", nsOperator).Output()
e2e.Logf("err %v, msg %v", err, msg)
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", operatorGroup).Output()
e2e.Logf("err %v, msg %v", err, msg)
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", subscription).Output()
e2e.Logf("err %v, msg %v", err, msg)
// checking subscription status
errCheck := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
subState, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "openshift-custom-metrics-autoscaler-operator", "-n", operatorNamespace, "-o=jsonpath={.status.state}").Output()
//o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(subState, "AtLatestKnown") == 0 {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "openshift-custom-metrics-autoscaler-operator", "-n", operatorNamespace, "--no-headers").Output()
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("subscription openshift-custom-metrics-autoscaler-operator is not correct status"))
// checking csv status
csvName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "openshift-custom-metrics-autoscaler-operator", "-n", operatorNamespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
errCheck = wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
csvState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", operatorNamespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(csvState, "Succeeded") == 0 {
e2e.Logf("CSV check complete!!!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("csv %v is not correct status", csvName))
}
func waitForPodWithLabelReady(oc *exutil.CLI, ns, label string) error {
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns, "-l", label, "-ojsonpath={.items[*].status.conditions[?(@.type==\"Ready\")].status}").Output()
e2e.Logf("the Ready status of pod is %v", status)
if err != nil || status == "" {
e2e.Logf("failed to get pod status: %v, retrying...", err)
return false, nil
}
if strings.Contains(status, "False") {
e2e.Logf("the pod Ready status not met; wanted True but got %v, retrying...", status)
return false, nil
}
return true, nil
})
}
// this function is for check kubelet_log_level
func assertKubeletLogLevel(oc *exutil.CLI) {
var kubeservice string
var kublet string
var err error
waitErr := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
nodeName, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
nodes := strings.Fields(nodeName)
for _, node := range nodes {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(statusErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", node, nodeStatus)
if nodeStatus == "True" {
kubeservice, err = exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "systemctl show kubelet.service | grep KUBELET_LOG_LEVEL")
o.Expect(err).NotTo(o.HaveOccurred())
kublet, err = exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "ps aux | grep kubelet")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(kubeservice), "KUBELET_LOG_LEVEL") && strings.Contains(string(kublet), "--v=2") {
e2e.Logf(" KUBELET_LOG_LEVEL is 2. \n")
return true, nil
} else {
e2e.Logf(" KUBELET_LOG_LEVEL is not 2. \n")
return false, nil
}
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return false, nil
})
if waitErr != nil {
e2e.Logf("Kubelet Log level is:\n %v\n", kubeservice)
e2e.Logf("Running Proccess of kubelet are:\n %v\n", kublet)
}
exutil.AssertWaitPollNoErr(waitErr, "KUBELET_LOG_LEVEL is not expected")
}
// this function create VPA(Vertical Pod Autoscaler) operator
func createVpaOperator(oc *exutil.CLI) {
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
operatorGroup := filepath.Join(buildPruningBaseDir, "vpa-operatorgroup.yaml")
subscription := filepath.Join(buildPruningBaseDir, "vpa-subscription.yaml")
nsOperator := filepath.Join(buildPruningBaseDir, "ns-vpa-operator.yaml")
operatorNamespace := "openshift-vertical-pod-autoscaler"
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", nsOperator).Output()
e2e.Logf("err %v, msg %v", err, msg)
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", operatorGroup).Output()
e2e.Logf("err %v, msg %v", err, msg)
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", subscription).Output()
e2e.Logf("err %v, msg %v", err, msg)
// checking subscription status
errCheck := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
subState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "vertical-pod-autoscaler", "-n", operatorNamespace, "-o=jsonpath={.status.state}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(subState, "AtLatestKnown") == 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("subscription vertical-pod-autoscaler is not correct status"))
// checking csv status
csvName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "vertical-pod-autoscaler", "-n", operatorNamespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
errCheck = wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
csvState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", operatorNamespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(csvState, "Succeeded") == 0 {
e2e.Logf("CSV check complete!!!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("csv %v is not correct status", csvName))
}
// this function is for updating the runtimeRequestTimeout parameter using KubeletConfig CR
func runTimeTimeout(oc *exutil.CLI) {
var kubeletConf string
var err error
nodeName, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/worker=", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
nodes := strings.Fields(nodeName)
waitErr := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
for _, node := range nodes {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(statusErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", node, nodeStatus)
if nodeStatus == "True" {
kubeletConf, err = exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "cat /etc/kubernetes/kubelet.conf | grep runtimeRequestTimeout")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(kubeletConf), "runtimeRequestTimeout") && strings.Contains(string(kubeletConf), ":") && strings.Contains(string(kubeletConf), "3m0s") {
e2e.Logf(" RunTime Request Timeout is 3 minutes. \n")
return true, nil
} else {
e2e.Logf("Runtime Request Timeout is not 3 minutes. \n")
return false, nil
}
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return false, nil
})
if waitErr != nil {
e2e.Logf("RunTime Request Timeout is:\n %v\n", kubeletConf)
}
exutil.AssertWaitPollNoErr(waitErr, "Runtime Request Timeout is not expected")
}
func checkConmonForAllNode(oc *exutil.CLI) {
var configStr string
waitErr := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
nodeName, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/worker=", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
nodes := strings.Fields(nodeName)
for _, node := range nodes {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(statusErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode [%s] Status is %s\n", node, nodeStatus)
if nodeStatus == "True" {
configStr, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "crio config | grep 'conmon = \"\"'")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(configStr), "conmon = \"\"") {
e2e.Logf(" conmon check pass. \n")
} else {
e2e.Logf(" conmon check failed. \n")
return false, nil
}
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
return false, nil
}
}
return true, nil
})
if waitErr != nil {
e2e.Logf("conmon string is:\n %v\n", configStr)
}
exutil.AssertWaitPollNoErr(waitErr, "the conmon is not as expected!")
}
// waitClusterOperatorAvailable waits for all the Cluster Operator resources to be
// in Available state. This generic function can be used either after draining a node
// or after an upgrade.
func waitClusterOperatorAvailable(oc *exutil.CLI) {
timeout := 120
waitErr := wait.Poll(10*time.Second, time.Duration(timeout)*time.Minute, func() (bool, error) {
availableCOStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator", "-o=jsonpath={.items[*].status.conditions[?(@.type==\"Available\")].status}").Output()
if err != nil || strings.Contains(availableCOStatus, "False") {
e2e.Logf("Some Cluster Operator is still Unavailable")
return false, nil
}
return true, nil
})
exutil.AssertWaitPollNoErr(waitErr, fmt.Sprintf("Some cluster operator is still unavailable after %v seconds ...", timeout))
}
func checkUpgradeMachineConfig(oc *exutil.CLI) {
var machineconfig string
waitErr := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
upgradestatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "machine-config").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n Upgrade status is %s\n", upgradestatus)
machineconfig, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("mc").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
if strings.Contains(string(machineconfig), "99-worker-generated-kubelet-1") {
re := regexp.MustCompile("99-worker-generated-kubelet")
found := re.FindAllString(machineconfig, -1)
lenstr := len(found)
if lenstr == 2 {
e2e.Logf("\n Upgrade happened successfully")
return true, nil
} else {
e2e.Logf("\nError")
return false, nil
}
} else {
e2e.Logf(" Upgarde has failed \n")
return false, nil
}
return true, nil
})
if waitErr != nil {
e2e.Logf("machine config is %s\n", machineconfig)
}
exutil.AssertWaitPollNoErr(waitErr, "the machine config is not as expected.")
}
func ProbeTerminatePeriod(oc *exutil.CLI, terminatePeriod int, probeterminatePeriod int, podName string, namespace string, flag bool) {
var terminate = 0
if flag == true {
terminate = probeterminatePeriod
} else {
terminate = terminatePeriod
}
e2e.Logf("terminate is: %v", terminate)
waitErr := wait.Poll(10*time.Second, 4*time.Minute, func() (bool, error) {
podDesc, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pod", podName, "-n", namespace).OutputToFile("podDesc.txt")
o.Expect(err).NotTo(o.HaveOccurred())
probeFailT, _ := exec.Command("bash", "-c", "cat "+podDesc+" | grep \"Container.*failed.*probe, will be restarted\"").Output()
conStartT, _ := exec.Command("bash", "-c", "cat "+podDesc+" | grep \"Started container test\" ").Output()
e2e.Logf("probeFailT is: %v", string(probeFailT))
e2e.Logf("conStartT is: %v", string(conStartT))
if string(probeFailT) != "" && string(conStartT) != "" {
// count probeFailT- conStartT between [terminate-3, terminate+3]
var time1 = strings.Fields(string(probeFailT))[2]
var time2 = strings.Fields(string(conStartT))[2]
var time1Min string
var timeTemp string
var time1Sec string
var time1MinInt int
var time1SecInt int
if strings.Contains(time1, "m") {
time1Min = strings.Split(time1, "m")[0]
timeTemp = strings.Split(time1, "m")[1]
time1MinInt, err = strconv.Atoi(time1Min)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(time1, "s") {
time1Sec = strings.Split(timeTemp, "s")[0]
time1SecInt, err = strconv.Atoi(time1Sec)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
time1Sec = "0"
time1SecInt = 0
}
} else {
time1Sec = strings.Split(time1, "s")[0]
time1SecInt, err = strconv.Atoi(time1Sec)
o.Expect(err).NotTo(o.HaveOccurred())
time1MinInt = 0
}
e2e.Logf("time1Min:%v, timeTemp:%v, time1Sec:%v, time1MinInt:%v, time1SecInt:%v", time1Min, timeTemp, time1Sec, time1MinInt, time1SecInt)
timeSec1 := time1MinInt*60 + time1SecInt
e2e.Logf("timeSec1: %v ", timeSec1)
var time2Min string
var time2Sec string
var time2MinInt int
var time2SecInt int
if strings.Contains(time2, "m") {
time2Min = strings.Split(time2, "m")[0]
timeTemp = strings.Split(time2, "m")[1]
time2MinInt, err = strconv.Atoi(time2Min)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(time2, "s") {
time2Sec = strings.Split(timeTemp, "s")[0]
time2SecInt, err = strconv.Atoi(time2Sec)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
time2Sec = "0"
time2SecInt = 0
}
} else {
time2Sec = strings.Split(time2, "s")[0]
time2SecInt, err = strconv.Atoi(time2Sec)
o.Expect(err).NotTo(o.HaveOccurred())
time2MinInt = 0
}
e2e.Logf("time2Min:%v, time2Sec:%v, time2MinInt:%v, time2SecInt:%v", time2Min, time2Sec, time2MinInt, time2SecInt)
timeSec2 := time2MinInt*60 + time2SecInt
e2e.Logf("timeSec2: %v ", timeSec2)
if ((timeSec1 - timeSec2) >= (terminate - 3)) && ((timeSec1 - timeSec2) <= (terminate + 3)) {
e2e.Logf("terminationGracePeriod check pass")
return true, nil
} else {
e2e.Logf("terminationGracePeriod check failed")
return false, nil
}
} else {
e2e.Logf("not capture data")
return false, nil
}
})
exutil.AssertWaitPollNoErr(waitErr, "probe terminationGracePeriod is not as expected!")
}
//this functionn is for Install and verify Cluster Resource Override Admission Webhook
func installOperatorClusterresourceoverride(oc *exutil.CLI) {
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
nsclusterresourceoperatorTemp := filepath.Join(buildPruningBaseDir, "ns-clusterresource-operator.yaml")
croperatorgroupTemp := filepath.Join(buildPruningBaseDir, "cr-operatorgroup.yaml")
crsubscriptionTemp := filepath.Join(buildPruningBaseDir, "cr-subscription.yaml")
operatorNamespace := "clusterresourceoverride-operator"
ns, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", nsclusterresourceoperatorTemp).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("/n Namespace status is %v", ns)
og, err1 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", croperatorgroupTemp).Output()
o.Expect(err1).NotTo(o.HaveOccurred())
e2e.Logf("/n Operator group status is %v", og)
subscrip, err2 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", crsubscriptionTemp).Output()
o.Expect(err2).NotTo(o.HaveOccurred())
e2e.Logf("/n Subscription status is %v", subscrip)
errCheck := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
subscription, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "clusterresourceoverride", "-n", operatorNamespace, "-o=jsonpath={.status.state}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(subscription, "AtLatestKnown") == 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("subscription clusterresourceoverride is not in correct status"))
csvName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "clusterresourceoverride", "-n", operatorNamespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
errCheck = wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
csvState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", operatorNamespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(csvState, "Succeeded") == 0 {
e2e.Logf("CSV check complete!!!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("csv %v is not correct status", csvName))
}
func createCRClusterresourceoverride(oc *exutil.CLI) {
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
clusterresourceoverrideTemp := filepath.Join(buildPruningBaseDir, "clusterresource-override.yaml")
cro, err3 := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", clusterresourceoverrideTemp).Output()
o.Expect(err3).NotTo(o.HaveOccurred())
e2e.Logf("/n Cluster Resource Overrride status is %v", cro)
}
func deleteAPIService(oc *exutil.CLI) {
e2e.Logf("Deleting apiservice v1.admission.autoscaling.openshift.io to unblock other test cases")
_, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("apiservice", "v1.admission.autoscaling.openshift.io").Output()
if err != nil {
e2e.Logf("Failed to delete apiservice: %v", err)
} else {
e2e.Logf("Successfully deleted apiservice v1.admission.autoscaling.openshift.io")
}
}
//this function is to test config changes to Cluster Resource Override Webhook
func testCRClusterresourceoverride(oc *exutil.CLI) {
patch := `[{"op": "replace", "path": "/spec/podResourceOverride/spec/cpuRequestToLimitPercent", "value":40},{"op": "replace", "path": "/spec/podResourceOverride/spec/limitCPUToMemoryPercent", "value":90},{"op": "replace", "path": "/spec/podResourceOverride/spec/memoryRequestToLimitPercent", "value":50}]`
test, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("clusterresourceoverride.operator.autoscaling.openshift.io", "cluster", "--type=json", "-p", patch).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n Parameters edited %v", test)
o.Expect(strings.Contains(test, "clusterresourceoverride.operator.autoscaling.openshift.io/cluster patched")).To(o.BeTrue())
cpuRequestToLimitPercent, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterResourceOverride", "cluster", "-o=jsonpath={.spec.podResourceOverride.spec.cpuRequestToLimitPercent}").Output()
limitCPUToMemoryPercent, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterResourceOverride", "cluster", "-o=jsonpath={.spec.podResourceOverride.spec.limitCPUToMemoryPercent}").Output()
memoryRequestToLimitPercent, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterResourceOverride", "cluster", "-o=jsonpath={.spec.podResourceOverride.spec.memoryRequestToLimitPercent}").Output()
if cpuRequestToLimitPercent == "40" && limitCPUToMemoryPercent == "90" && memoryRequestToLimitPercent == "50" {
e2e.Logf("Successfully updated the file")
} else {
e2e.Failf("Cluster resource overrides not updated successfully")
}
}
func checkICSP(oc *exutil.CLI) bool {
icsp, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageContentSourcePolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(icsp, "No resources found") {
e2e.Logf("there is no ImageContentSourcePolicy in this cluster")
return false
}
return true
}
func checkIDMS(oc *exutil.CLI) bool {
icsp, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageDigestMirrorSet").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(icsp, "No resources found") {
e2e.Logf("there is no ImageDigestMirrorSet in this cluster")
return false
}
return true
}
func checkICSPorIDMSorITMS(oc *exutil.CLI) bool {
icsp, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageContentSourcePolicy").Output()
o.Expect(err).NotTo(o.HaveOccurred())
idms, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageDigestMirrorSet").Output()
o.Expect(err).NotTo(o.HaveOccurred())
itms, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageTagMirrorSet").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(icsp, "No resources found") && strings.Contains(idms, "No resources found") && strings.Contains(itms, "No resources found") {
e2e.Logf("there is no ImageContentSourcePolicy, ImageDigestMirrorSet and ImageTagMirrorSet in this cluster")
return false
}
return true
}
func checkRegistryForIdms(oc *exutil.CLI) error {
return wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodename := nodeList.Items[0].Name
registry, _ := exutil.DebugNodeWithChroot(oc, nodename, "cat", "/etc/containers/registries.conf")
//not handle err as a workaround of issue: debug container needs more time to start in 4.13&4.14
//o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(registry), "registry.access.redhat.com/ubi9/ubi-minimal") && strings.Contains(string(registry), "example.io/example/ubi-minimal") && strings.Contains(string(registry), "example.com/example/ubi-minimal") && strings.Contains(string(registry), "pull-from-mirror = \"digest-only\"") && strings.Contains(string(registry), "location = \"registry.example.com/example\"") && strings.Contains(string(registry), "blocked = true") {
e2e.Logf("ImageDigestMirrorSet apply successfully!")
} else {
e2e.Logf("ImageDigestMirrorSet apply failed!")
return false, nil
}
return true, nil
})
}
func checkImgSignature(oc *exutil.CLI) error {
return wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodename := nodeList.Items[0].Name
imgSig, _ := exutil.DebugNodeWithChroot(oc, nodename, "cat", "/etc/containers/policy.json")
//not handle err as a workaround of issue: debug container needs more time to start in 4.13&4.14
//o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(imgSig), "registry.access.redhat.com") && strings.Contains(string(imgSig), "signedBy") && strings.Contains(string(imgSig), "GPGKeys") && strings.Contains(string(imgSig), "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release") && strings.Contains(string(imgSig), "registry.redhat.io") {
e2e.Logf("Image signature verified pass!")
return true, nil
}
e2e.Logf("Image signature verified failed!")
return false, nil
})
}
func checkCrun(oc *exutil.CLI) {
var crunProc string
var libcrun string
nodeList, err := e2enode.GetReadySchedulableNodes(context.TODO(), oc.KubeFramework().ClientSet)
o.Expect(err).NotTo(o.HaveOccurred())
nodename := nodeList.Items[0].Name
waitErr := wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
crunProc, _ = exutil.DebugNodeWithChroot(oc, nodename, "bash", "-c", "ps -aux | grep crun")
libcrun, _ = exutil.DebugNodeWithChroot(oc, nodename, "bash", "-c", "systemctl status crio-$(sudo crictl ps -aq | head -n1).scope")
if strings.Contains(string(crunProc), "root=/run/crun") && strings.Contains(string(libcrun), "libcrun") {
e2e.Logf("crun is running!")
return true, nil
}
e2e.Logf("crun is not running!")
return false, nil
})
if waitErr != nil {
e2e.Logf("crunProc is :\n%s\n", crunProc)
e2e.Logf("libcrun is :\n%s\n", libcrun)
}
exutil.AssertWaitPollNoErr(waitErr, "crun check failed!")
}
// this function is for upgrade test to check SYSTEM_RESERVED_ES parameter is not empty
func parameterCheck(oc *exutil.CLI) {
nodeName, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/worker=", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
nodes := strings.Fields(nodeName)
waitErr := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
for _, node := range nodes {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(statusErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", node, nodeStatus)
if nodeStatus == "True" {
sysreservedes, _ := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "cat /etc/node-sizing.env")
if strings.Contains(sysreservedes, "SYSTEM_RESERVED_ES=1Gi") {
e2e.Logf("SYSTEM_RESERVED_ES default value is set. \n")
} else {
e2e.Logf("SYSTEM_RESERVED_ES default value has not been set. \n")
return false, nil
}
} else {
e2e.Logf("\n NODES ARE NOT READY\n")
return false, nil
}
}
return true, nil
})
if waitErr != nil {
e2e.Logf("check failed")
}
exutil.AssertWaitPollNoErr(waitErr, "not default value")
}
func checkLogLink(oc *exutil.CLI, namespace string) {
waitErr := wait.Poll(10*time.Second, 30*time.Second, func() (bool, error) {
podname, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
log1, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(string(podname), "-n", namespace, "--", "cat", "/acme-logs/logs/httpd/0.log").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(log1, "httpd -D FOREGROUND") {
e2e.Logf("log link successfully")
} else {
e2e.Logf("log link failed!")
return false, nil
}
nodename, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].spec.nodeName}", "-n", namespace).Output()
e2e.Logf("The nodename is %v", nodename)
o.Expect(err).NotTo(o.HaveOccurred())
nodeReadyBool, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", nodename), "-o=jsonpath={.status.conditions[?(@.reason=='KubeletReady')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].status.podIP}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if nodeReadyBool == "True" {
output, err := exutil.DebugNodeWithChroot(oc, nodename, "curl", podIP)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(output, "It works!") {
e2e.Logf("curl successfully")
} else {
e2e.Logf("curl failed!")
return false, nil
}
} else {
e2e.Logf("NODES ARE NOT READY!")
}
log2, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(string(podname), "-n", namespace, "--", "cat", "/acme-logs/logs/httpd/0.log").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(log2, "\"GET / HTTP/1.1\" 200 45") {
e2e.Logf("log link update successfully")
return true, nil
} else {
e2e.Logf("log link update failed!")
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, "check log link failed!")
}
// this function check Cpu Quota Disabled from container scope and pod cgroup
func checkCpuQuotaDisabled(oc *exutil.CLI, namespace string, podName string, cgroupV string) {
waitErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
if cgroupV == "tmpfs" {
e2e.Logf("the cgroup version is v1, not support in 4.16+")
} else if cgroupV == "cgroup2fs" { // it's for cgroup v2
out1, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, podName, "--", "/bin/sh", "-c", "cat /sys/fs/cgroup/cpu.stat | grep nr_throttled").Output()
if err != nil {
e2e.Logf("failed to check /sys/fs/cgroup/cpu.stat, error: %s ", err)
return false, nil
}
o.Expect(strings.Contains(string(out1), "nr_throttled 0")).Should(o.BeTrue())
out2, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, podName, "--", "/bin/sh", "-c", "cat /sys/fs/cgroup/cpu.max").Output()
if err != nil {
e2e.Logf("failed to check /sys/fs/cgroup/cpu.max, error: %s ", err)
return false, nil
}
o.Expect(strings.Contains(string(out2), "max 100000")).Should(o.BeTrue())
return true, nil
} else {
e2e.Logf("the cgroup version [%s] is valid", cgroupV)
}
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, "check Cpu Quota Disabled failed!")
}
// this function check Cpu Load Balance Disabled from the pod's host dmesg log
func checkCpuLoadBalanceDisabled(oc *exutil.CLI, namespace string, podName string) {
waitErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
nodename, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-o=jsonpath={.spec.nodeName}", "-n", namespace).Output()
if err != nil {
e2e.Logf("failed to get the pod's node name, error: %s ", err)
return false, nil
}
out, err := exutil.DebugNodeWithChroot(oc, nodename, "/bin/bash", "-c", "dmesg | grep 'CPUs do not have asymmetric capacities'")
if err != nil {
e2e.Logf("failed to check CPUs asymmetric capacities, error: %s ", err)
return false, nil
}
//For CPU, we set reserved: 1-4, isolated: 0,5-7;
//If cpu 0 is load balance disabled, the log show [rd 1-7: CPUs do not have asymmetric capacities]
//If cpu 0 and cpu 5 are load balance disabled, the log show [rd 1-4,6-7: CPUs do not have asymmetric capacities]
//As long as any cpu is load balance disabled, the log won't be [rd 0-7: CPUs do not have asymmetric capacities]
//If the pod doesn't include annotation "cpu-load-balancing.crio.io: "disable"", the log won't appear [CPUs do not have asymmetric capacities]
o.Expect(strings.Contains(string(out), "CPUs do not have asymmetric capacities")).Should(o.BeTrue())
o.Expect(out).ShouldNot(o.ContainSubstring("rd 0-7: CPUs do not have asymmetric capacities"))
return true, nil
})
exutil.AssertWaitPollNoErr(waitErr, "check Cpu Quota Disabled failed!")
}
// this function can show cpu schedule info in dmesg log, flag="0" : turn on / flag="1" : turn off
func dmesgTurnOnCpu(oc *exutil.CLI, flag string) {
nodeName, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/worker=", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
nodeList := strings.Fields(nodeName)
waitErr := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
for _, node := range nodeList {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
if statusErr != nil {
e2e.Logf("failed to get node status, error: %s ", statusErr)
return false, nil
}
e2e.Logf("\nNode %s Status is %s\n", node, nodeStatus)
if nodeStatus != "True" {
e2e.Logf("\n NODES ARE NOT READY\n")
return false, nil
}
switch flag {
case "0":
_, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "echo Y > /sys/kernel/debug/sched/verbose")
if err != nil {
e2e.Logf("\n failed to set Y to CPU, error: %v ", err)
return false, nil
}
case "1":
_, err := exutil.DebugNodeWithChroot(oc, node, "/bin/bash", "-c", "echo N > /sys/kernel/debug/sched/verbose")
if err != nil {
e2e.Logf("\n failed to set N to CPU, error: %v ", err)
return false, nil
}
default:
e2e.Logf("\n switch flag [%s] is invalid", flag)
return false, nil
}
}
return true, nil
})
exutil.AssertWaitPollNoErr(waitErr, "dmesg set cpu log failed!")
}
// this function create KedaController from template for CMA
func (cmaKedaController *cmaKedaControllerDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cmaKedaController.template, "-p", "LEVEL="+cmaKedaController.level, "NAMESPACE="+cmaKedaController.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
waitForDeploymentPodsToBeReady(oc, "openshift-keda", "keda-metrics-apiserver")
}
// this function delete KedaController for CMA
func (cmaKedaController *cmaKedaControllerDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", cmaKedaController.namespace, "KedaController", cmaKedaController.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (pvcKedaController *pvcKedaControllerDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pvcKedaController.template, "-p", "LEVEL="+pvcKedaController.level, "NAMESPACE="+pvcKedaController.namespace, "WATCHNAMESPACE="+pvcKedaController.watchNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
waitForDeploymentPodsToBeReady(oc, "openshift-keda", "keda-metrics-apiserver")
}
func (pvcKedaController *pvcKedaControllerDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", pvcKedaController.namespace, "KedaController", pvcKedaController.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func waitPodReady(oc *exutil.CLI, ns string, label string) {
podNameList := getPodNameByLabel(oc, ns, label)
exutil.AssertPodToBeReady(oc, podNameList[0], ns)
}
func getPodNameByLabel(oc *exutil.CLI, namespace string, label string) []string {
var podName []string
podNameAll, err := oc.AsAdmin().Run("get").Args("-n", namespace, "pod", "-l", label, "-ojsonpath={.items..metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podName = strings.Split(podNameAll, " ")
e2e.Logf("The pod(s) are %v ", podName)
return podName
}
// WaitForDeploymentPodsToBeReady waits for the specific deployment to be ready
func waitForDeploymentPodsToBeReady(oc *exutil.CLI, namespace string, name string) {
var selectors map[string]string
err := wait.Poll(5*time.Second, 180*time.Second, func() (done bool, err error) {
deployment, err := oc.AdminKubeClient().AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2e.Logf("Waiting for availability of deployment/%s\n", name)
return false, nil
}
return false, err
}
selectors = deployment.Spec.Selector.MatchLabels
if deployment.Status.AvailableReplicas == *deployment.Spec.Replicas && deployment.Status.UpdatedReplicas == *deployment.Spec.Replicas {
e2e.Logf("Deployment %s available (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return true, nil
}
e2e.Logf("Waiting for full availability of %s deployment (%d/%d)\n", name, deployment.Status.AvailableReplicas, *deployment.Spec.Replicas)
return false, nil
})
if err != nil && len(selectors) > 0 {
var labels []string
for k, v := range selectors {
labels = append(labels, k+"="+v)
}
label := strings.Join(labels, ",")
podStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[].status.conditions}").Output()
containerStatus, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", label, "-ojsonpath={.items[].status.containerStatuses}").Output()
e2e.Failf("deployment %s is not ready:\nconditions: %s\ncontainer status: %s", name, podStatus, containerStatus)
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("deployment %s is not available", name))
}
// Poll to wait for kafka to be ready
func waitForKafkaReady(oc *exutil.CLI, kafkaName string, kafkaNS string) {
err := wait.Poll(3*time.Second, 180*time.Second, func() (done bool, err error) {
command := []string{"kafka.kafka.strimzi.io", kafkaName, "-n", kafkaNS, `-o=jsonpath={.status.conditions[*].type}`}
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(command...).Output()
if err != nil {
e2e.Logf("kafka status ready error: %v", err)
return false, err
}
if output == "Ready" || output == "Warning Ready" || output == "Warning Warning Warning Warning Ready" {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("resource kafka/%s did not appear", kafkaName))
}
// Poll to wait for kafka Topic to be ready
func waitForKafkaTopicReady(oc *exutil.CLI, kafkaTopicName string, kafkaTopicNS string) {
err := wait.Poll(3*time.Second, 180*time.Second, func() (done bool, err error) {
command := []string{"kafkaTopic", kafkaTopicName, "-n", kafkaTopicNS, `-o=jsonpath='{.status.conditions[*].type}'`}
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(command...).Output()
if err != nil {
e2e.Logf("kafka Topic status ready error: %v", err)
return false, err
}
status := strings.Replace(output, "'", "", 2)
e2e.Logf("Waiting for kafka status %s", status)
if status == "Ready" {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("resource kafkaTopic/%s did not appear", kafkaTopicName))
}
// this function uninstall AMQ operator
func removeAmqOperator(oc *exutil.CLI) {
operatorNamespace := "kafka-52384"
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", operatorNamespace, "sub", "amq-streams").Output()
if err != nil {
e2e.Logf("%v", msg)
}
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", operatorNamespace, "csv", "-l", "operators.coreos.com/amq-streams.openshift-operators").Execute()
}
// this function create AMQ operator
func createAmqOperator(oc *exutil.CLI) {
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
subscription := filepath.Join(buildPruningBaseDir, "amq-sub.yaml")
operatorNamespace := "kafka-52384"
operatorGroupFile := filepath.Join(buildPruningBaseDir, "amq-operatorgroup-52384.yaml")
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", operatorGroupFile).Output()
e2e.Logf("err %v, msg %v", err, msg)
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", subscription).Output()
e2e.Logf("err %v, msg %v", err, msg)
// checking subscription status
errCheck := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
subState, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "amq-streams", "-n", operatorNamespace, "-o=jsonpath={.status.state}").Output()
//o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(subState, "AtLatestKnown") == 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, "subscription amq-streams is not correct status")
// checking csv status
csvName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "amq-streams", "-n", operatorNamespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
errCheck = wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
csvState, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", operatorNamespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(csvState, "Succeeded") == 0 {
e2e.Logf("CSV check complete!!!")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, "subscription amq-streams is not correct status")
}
func createProject(oc *exutil.CLI, namespace string) {
oc.CreateSpecifiedNamespaceAsAdmin(namespace)
/* turn off the automatic label synchronization required for PodSecurity admission
set pods security profile to privileged. See
https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-levels */
err := exutil.SetNamespacePrivileged(oc, namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
// this function delete a workspace, we intend to do it after each test case run
func deleteProject(oc *exutil.CLI, namespace string) {
oc.DeleteSpecifiedNamespaceAsAdmin(namespace)
}
func (triggerAuthentication *triggerAuthenticationDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", triggerAuthentication.template, "-p", "SECRET_NAME="+triggerAuthentication.secretname, "NAMESPACE="+triggerAuthentication.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
//this function is to [MCO] change container registry config
func checkImageConfigUpdatedAsExpected(oc *exutil.CLI) {
buildPruningBaseDir := exutil.FixturePath("testdata", "node")
ImageconfigContTemp := filepath.Join(buildPruningBaseDir, "image-config.json")
currentResourceVersion, getRvErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("image.config", "cluster", "-ojsonpath={.metadata.resourceVersion}").Output()
o.Expect(getRvErr).NotTo(o.HaveOccurred())
if currentResourceVersion != "" {
testImageConfigJSONByte, readFileErr := ioutil.ReadFile(ImageconfigContTemp)
o.Expect(readFileErr).NotTo(o.HaveOccurred())
testImageConfigJSON, err := sjson.Set(string(testImageConfigJSONByte), `metadata.resourceVersion`, currentResourceVersion)
o.Expect(err).NotTo(o.HaveOccurred())
path := filepath.Join(e2e.TestContext.OutputDir, "new-imageConfig"+"-"+getRandomString()+".json")
o.Expect(ioutil.WriteFile(path, pretty.Pretty([]byte(testImageConfigJSON)), 0644)).NotTo(o.HaveOccurred())
e2e.Logf("The new ImageConfig is %s", path)
ImageconfigContTemp = path
}
imgfile, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", ImageconfigContTemp).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\n Image File status is %v", imgfile)
//for checking machine config
waitErr0 := wait.Poll(30*time.Second, 1*time.Minute, func() (bool, error) {
mc, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("mc", "--sort-by=metadata.creationTimestamp").Output()
o.Expect(err1).NotTo(o.HaveOccurred())
e2e.Logf("\n Machine configs are:\n %s", mc)
oc.NotShowInfo()
if strings.Contains(string(mc), "rendered") {
e2e.Logf(" New render configs are generated. \n")
return true, nil
}
e2e.Logf(" New render configs are not generated. \n")
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr0, "New Renders are not expected")
//waiting for mcp to get updated
exutil.By("Check mcp finish rolling out")
oc.NotShowInfo()
mcpName := "worker"
mcpName2 := "master"
err3 := checkMachineConfigPoolStatus(oc, mcpName)
exutil.AssertWaitPollNoErr(err3, "macineconfigpool worker update failed")
err4 := checkMachineConfigPoolStatus(oc, mcpName2)
exutil.AssertWaitPollNoErr(err4, "macineconfigpool master update failed")
//for checking machine config pool
mcp, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp").Output()
o.Expect(err2).NotTo(o.HaveOccurred())
e2e.Logf("\n Machine config pools are:\n %s", mcp)
nodeName, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
nodes := strings.Fields(nodeName)
waitErr := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", nodes[0], "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(statusErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", nodes[0], nodeStatus)
if nodeStatus == "True" {
registrieslist, err4 := exutil.DebugNodeWithChroot(oc, nodes[0], "cat", "/etc/containers/registries.conf.d/01-image-searchRegistries.conf")
o.Expect(err4).NotTo(o.HaveOccurred())
e2e.Logf("\nImage Registry list is %v", registrieslist)
o.Expect(strings.TrimSpace(registrieslist)).NotTo(o.BeEmpty())
if strings.Contains((registrieslist), "qe.quay.io") {
e2e.Logf(" Configuration has been changed successfully. \n")
return true, nil
}
e2e.Logf(" Changes has not been made. \n")
return false, nil
}
e2e.Logf("\n NODES ARE NOT READY\n ")
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, "Registry List is not expected")
}
func createImageConfigWIthExportJSON(oc *exutil.CLI, originImageConfigJSON string) {
var (
err error
finalJSONContent string
)
currentResourceVersion, getRvErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("image.config", "cluster", "-ojsonpath={.metadata.resourceVersion}").Output()
o.Expect(getRvErr).NotTo(o.HaveOccurred())
finalJSONContent, err = sjson.Set(originImageConfigJSON, `metadata.resourceVersion`, currentResourceVersion)
o.Expect(err).NotTo(o.HaveOccurred())
path := filepath.Join(e2e.TestContext.OutputDir, "restored-imageConfig"+"-"+getRandomString()+".json")
o.Expect(ioutil.WriteFile(path, pretty.Pretty([]byte(finalJSONContent)), 0644)).NotTo(o.HaveOccurred())
e2e.Logf("The restored ImageConfig is %s", path)
_, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", path).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The ImageConfig restored successfully")
}
func waitCoBecomes(oc *exutil.CLI, coName string, waitTime int, expectedStatus map[string]string) error {
return wait.Poll(5*time.Second, time.Duration(waitTime)*time.Second, func() (bool, error) {
gottenStatus := getCoStatus(oc, coName, expectedStatus)
eq := reflect.DeepEqual(expectedStatus, gottenStatus)
if eq {
eq := reflect.DeepEqual(expectedStatus, map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"})
if eq {
// For True False False, we want to wait some bit more time and double check, to ensure it is stably healthy
time.Sleep(100 * time.Second)
gottenStatus := getCoStatus(oc, coName, expectedStatus)
eq := reflect.DeepEqual(expectedStatus, gottenStatus)
if eq {
e2e.Logf("Given operator %s becomes available/non-progressing/non-degraded", coName)
return true, nil
}
} else {
e2e.Logf("Given operator %s becomes %s", coName, gottenStatus)
return true, nil
}
}
return false, nil
})
}
func getCoStatus(oc *exutil.CLI, coName string, statusToCompare map[string]string) map[string]string {
newStatusToCompare := make(map[string]string)
for key := range statusToCompare {
args := fmt.Sprintf(`-o=jsonpath={.status.conditions[?(.type == '%s')].status}`, key)
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", args, coName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
newStatusToCompare[key] = status
}
return newStatusToCompare
}
// this function is to check qe-app-registry exists if not then use redhat-operators else skip the testcase
func (sub *subscriptionDescription) skipMissingCatalogsources(oc *exutil.CLI) {
output, errQeReg := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-marketplace", "catalogsource", "qe-app-registry").Output()
if errQeReg != nil && strings.Contains(output, "NotFound") {
output, errRed := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-marketplace", "catalogsource", "redhat-operators").Output()
if errRed != nil && strings.Contains(output, "NotFound") {
g.Skip("Skip since catalogsources not available")
} else {
o.Expect(errRed).NotTo(o.HaveOccurred())
}
sub.catalogSourceName = "redhat-operators"
} else {
o.Expect(errQeReg).NotTo(o.HaveOccurred())
}
}
// this function check sigstore signature verified from crio log
func checkSigstoreVerified(oc *exutil.CLI, namespace string, podName string, image string, docker_ns string) {
waitErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
nodename, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-o=jsonpath={.spec.nodeName}", "-n", namespace).Output()
if err != nil {
e2e.Logf("failed to get the pod's node name, error: %s ", err)
return false, nil
}
out, err := exutil.DebugNodeWithChroot(oc, nodename, "/bin/bash", "-c", "journalctl -u crio --since=\"5 minutes ago\"")
if err != nil {
e2e.Logf("failed to get crio log, error: %s ", err)
return false, nil
}
o.Expect(strings.Contains(string(out), "Looking for sigstore attachments in "+image)).Should(o.BeTrue()) //need uncomment
//for docker_ns, for example:
//docker.io ~ docker.io/lyman9966/rhel8
//quay.io/openshift-release-dev/ocp-release ~ quay.io/openshift-release-dev/ocp-release@sha256:c17d4489c1b283ee71c76dda559e66a546e16b208a57eb156ef38fb30098903a
o.Expect(strings.Contains(string(out), "Sigstore attachments: using \\\"docker\\\" namespace "+docker_ns)).Should(o.BeTrue()) //need uncomment
o.Expect(strings.Contains(string(out), "Found a sigstore attachment manifest with 1 layers")).Should(o.BeTrue())
o.Expect(strings.Contains(string(out), "Fetching sigstore attachment")).Should(o.BeTrue())
return true, nil
})
exutil.AssertWaitPollNoErr(waitErr, "check sigstore signature failed!")
}
func ExecCommandOnPod(oc *exutil.CLI, podname string, namespace string, command string) string {
var podOutput string
var execpodErr error
errExec := wait.PollUntilContextTimeout(context.Background(), 15*time.Second, 300*time.Second, false, func(cxt context.Context) (bool, error) {
podOutput, execpodErr = oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", namespace, podname, "--", "/bin/sh", "-c", command).Output()
podOutput = strings.TrimSpace(podOutput)
if execpodErr != nil {
return false, nil
}
return true, nil
})
if errExec != nil {
e2e.Logf(fmt.Sprintf("Run commands %q on pod %q failed of: %v, output is: %s", command, podname, execpodErr, podOutput))
}
exutil.AssertWaitPollNoErr(errExec, fmt.Sprintf("Run commands %q on pod %q failed", command, podname))
return podOutput
}
// this function return the cpu affinity of a pod
func getCpuAffinityFromPod(oc *exutil.CLI, namespace string, podname string) string {
cpuOut, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podname, "-n", namespace, "--", "/bin/sh", "-c", "cat /proc/self/status | grep Cpus_allowed_list").Output() // Cpus_allowed_list: 1,3
o.Expect(err).NotTo(o.HaveOccurred())
cpustr := strings.Split(cpuOut, ":")[1]
cpuAffinity := strings.TrimSpace(cpustr)
e2e.Logf(fmt.Sprintf("The cpu affinity is: %v", cpuAffinity))
return cpuAffinity
}
func getCpuAffinityFromCmd(oc *exutil.CLI, pid string, nodeName string) string {
tsksetCmd := fmt.Sprintf(`taskset -pc %v`, pid)
cpuAffinityOut, err := exutil.DebugNodeWithOptionsAndChroot(oc, nodeName, []string{"-q"}, "bash", "-c", tsksetCmd) //pid 2535's current affinity list: 0-3
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("command of `taskset -pc %v` return: %v", pid, cpuAffinityOut)
cpuAffinity := strings.Split(cpuAffinityOut, ":")[1]
cpuAffinity = strings.TrimSpace(cpuAffinity)
return cpuAffinity
}
func getPid(oc *exutil.CLI, podName string, namespace string, nodeName string) string {
containerIDString, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-n", namespace, "-o=jsonpath={.status.containerStatuses[0].containerID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
containerID := strings.Split(containerIDString, "//")[1] // cri-o://98d6bb3c6dbc367571d8cf4e50943184835f298b195361130cd98da4612c3b3b
e2e.Logf("containerID is %v", containerID)
getPidCmd := fmt.Sprintf(`crictl inspect %v | grep -E \"pid\":`, containerID) // "pid": 2535,
pidOut, err := exutil.DebugNodeWithOptionsAndChroot(oc, nodeName, []string{"-q"}, "bash", "-c", getPidCmd)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("pidOut is %v", pidOut)
pidMid := strings.Split(pidOut, ":")[1]
pid := strings.Split(pidMid, ",")[0]
pid = strings.TrimSpace(pid)
return pid
}
// this function check the cpu affinity of burstable pod
// coreNum means the number of cpu cores in the node
// guCore means the cpu core sequence that consumed by guranteed pod, when assigned "false" means no cpu core consumed by gu pod
func checkCpuAffinityBurst(oc *exutil.CLI, podName string, namespace string, nodeName string, coreNum int, guCore string) {
pid := getPid(oc, podName, namespace, nodeName)
burstCore := getCpuAffinityFromCmd(oc, pid, nodeName)
allCpu := "0-" + strconv.Itoa(coreNum-1)
if guCore == "false" {
o.Expect(burstCore == allCpu).To(o.BeTrue(), fmt.Sprintf("test failed: burstCore != allCpu // guCore is [%v], burstable core is [%v], cpu_num is [%v]", guCore, burstCore, coreNum))
e2e.Logf("verify pass: burstCore == allCpu // guCore is [%v], burstable core is [%v], cpu_num is [%v]", guCore, burstCore, coreNum)
} else {
burstout := getDiffSet(oc, coreNum, guCore)
e2e.Logf("The diff set of [allCpu - guCore] is: %v", burstout)
o.Expect(burstCore == burstout).To(o.BeTrue(), fmt.Sprintf("test failed: burstCore != allCpu - guCore // burstable core is [%v], guCore is [%v], cpu_num is [%v]", burstout, guCore, coreNum))
e2e.Logf("verify pass: burstCore = allCpu - guCore // burstable core is [%v], guCore is [%v], cpu_num is [%v]", burstout, guCore, coreNum)
}
checkCpuInterfaceFile(oc, pid, burstCore, nodeName)
}
func checkCpuInterfaceFile(oc *exutil.CLI, pid string, cpuAffinity string, nodeName string) {
/*
"pid": 78162
cat /proc/78162/cgroup == 0::/kubepods.slice/kubepods-pod7c259501_a249_479d_9280_621dcd56bc41.slice/crio-0f5e79c7110c8b6d767373b6b6defd4f9c284247a3ee205204f9e250be95fec1.scope/container
cat /sys/fs/cgroup/kubepods.slice/kubepods-pod7c259501_a249_479d_9280_621dcd56bc41.slice/crio-0f5e79c7110c8b6d767373b6b6defd4f9c284247a3ee205204f9e250be95fec1.scope/cpuset.cpus.effective == 0-3
cat /sys/fs/cgroup/kubepods.slice/kubepods-pod7c259501_a249_479d_9280_621dcd56bc41.slice/crio-0f5e79c7110c8b6d767373b6b6defd4f9c284247a3ee205204f9e250be95fec1.scope/cpuset.cpus == 0-3
*/
getCgroupCmd := fmt.Sprintf(`cat /proc/%v/cgroup`, pid)
cgroupOut, err := exutil.DebugNodeWithOptionsAndChroot(oc, nodeName, []string{"-q"}, "bash", "-c", getCgroupCmd)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the command of `cat /proc/%v/cgroup` return: %v", pid, cgroupOut)
cgroupStr := strings.Split(cgroupOut, "::")[1]
e2e.Logf("cgroupStr is: %v", cgroupStr)
cgroup := strings.TrimSpace(cgroupStr)
cgroup = strings.Trim(cgroup, "container") //4.18
//cgroup = cgroup + "/" //4.17
e2e.Logf("cgroup is: %v", cgroup)
cpuEffectiveCmd := fmt.Sprintf(`cat /sys/fs/cgroup%vcpuset.cpus.effective`, cgroup)
cpuEffective, err := exutil.DebugNodeWithOptionsAndChroot(oc, nodeName, []string{"-q"}, "bash", "-c", cpuEffectiveCmd)
o.Expect(err).NotTo(o.HaveOccurred())
cpuEffective = strings.TrimSpace(cpuEffective)
e2e.Logf("the command of `cat /sys/fs/cgroup%vcpuset.cpus.effective` return: %v", cgroup, cpuEffective)
/*
// here exists a bug, comment it temporarily
cpuCpusCmd := fmt.Sprintf(`cat /sys/fs/cgroup%vcpuset.cpus`, cgroup)
cpuCpus, err := exutil.DebugNodeWithOptionsAndChroot(oc, nodeName, []string{"-q"}, "bash", "-c", cpuCpusCmd)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the command of `cat /sys/fs/cgroup%vcpuset.cpus` return: %v", cgroup, cpuCpus)
*/
//e2e.Logf("cpuAffinity is: %v, cpuEffective is: %v and cpuCpus is: %v", cpuAffinity, cpuEffective, cpuCpus)
e2e.Logf("cpuAffinity is: %v, cpuEffective is: %v", cpuAffinity, cpuEffective)
// compare cpuAffinity == cpuEffective == cpuCpus
o.Expect(cpuAffinity == cpuEffective).To(o.BeTrue(), fmt.Sprintf("test failed! cpuAffinity != cpuEffective, cpuAffinity:%v and cpuEffective:%v", cpuAffinity, cpuEffective))
//o.Expect(cpuCpus == cpuEffective).To(o.BeTrue(), fmt.Sprintf("test failed, cpuCpus != cpuEffective : %v", cpuCpus))
//e2e.Logf("verify pass: cpuAffinity == cpuEffective == cpuCpus // cpuAffinity is %v, cpuEffective is %v, cpuCpus is %v", cpuAffinity, cpuEffective, cpuCpus)
e2e.Logf("verify pass: cpuAffinity == cpuEffective // cpuAffinity is %v, cpuEffective is %v", cpuAffinity, cpuEffective)
}
// get a difference set of cpu core, e.g. cpu_num is 4 , guCore is "1", then return "0,2-3"
func getDiffSet(oc *exutil.CLI, cpu_num int, guCore string) string {
fullSet := make([]int, cpu_num)
for i := 0; i < cpu_num; i++ {
fullSet[i] = i
}
// Parse the guCore "1" into individual numbers
excludeParts := strings.Split(guCore, ",")
excludeMap := make(map[int]bool)
for _, numStr := range excludeParts {
num, _ := strconv.Atoi(numStr)
excludeMap[num] = true
}
// Create a slice for the remaining numbers
var remaining []int
for _, num := range fullSet {
if !excludeMap[num] {
remaining = append(remaining, num)
}
}
return formatStr(remaining)
}
// format the numbers into a string with ranges, e.g. numbers[0,2,3], then return "0,2-3"
func formatStr(numbers []int) string {
var formatted []string
i := 0
for i < len(numbers) {
start := numbers[i]
// Find the end of the current contiguous range
for i+1 < len(numbers) && numbers[i+1] == numbers[i]+1 {
i++
}
end := numbers[i]
// If the range has only one element, just add it as a single number
if start == end {
formatted = append(formatted, strconv.Itoa(start))
} else {
// Otherwise, add it as a range
formatted = append(formatted, fmt.Sprintf("%d-%d", start, end))
}
i++
}
return strings.Join(formatted, ",")
}
// clusterNodesHealthcheck check abnormal nodes
func clusterNodesHealthcheck(oc *exutil.CLI, waitTime int) error {
errNode := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
if err == nil {
if strings.Contains(output, "NotReady") || strings.Contains(output, "SchedulingDisabled") {
return false, nil
}
} else {
return false, nil
}
e2e.Logf("Nodes are normal...")
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
})
if errNode != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
return errNode
}
func defaultRuntimeCheck(oc *exutil.CLI, expectedRuntime string) {
var defaultruntime string
var err error
waitErr := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
nodeName, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
nodes := strings.Fields(nodeName)
for _, node := range nodes {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(statusErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", node, nodeStatus)
if nodeStatus == "True" {
defaultruntime, err = exutil.DebugNodeWithChroot(oc, node, "cat", "/etc/crio/crio.conf.d/00-default")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(defaultruntime), expectedRuntime) {
e2e.Logf(" Success !! Default Runtime is %s. \n", expectedRuntime)
return true, nil
} else {
e2e.Logf(" FAILED!! Default Runtime is not %s \n", expectedRuntime)
return false, nil
}
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, "Default Runtime is not Expected")
}
func UpdatedRuntimeCheck(oc *exutil.CLI, runtime string) {
var defaultRuntime string
var err error
waitErr := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
nodeName, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
nodes := strings.Fields(nodeName)
for _, node := range nodes {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(statusErr).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", node, nodeStatus)
if nodeStatus == "True" {
defaultRuntime, err = exutil.DebugNodeWithChroot(oc, node, "cat", "/etc/crio/crio.conf.d/01-ctrcfg-defaultRuntime")
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(string(defaultRuntime), runtime) {
e2e.Logf(" Success !! Default Runtime is %s. \n", runtime)
return true, nil
} else {
e2e.Logf(" FAILED!! Default Runtime is not %s \n", runtime)
return false, nil
}
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, "Default Runtime is not Expected")
}
|
package node
| ||||
function
|
openshift/openshift-tests-private
|
90fabdbd-17f4-454b-b022-788390a8c17c
|
create
|
['cpuPerfProfile']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (cpuPerfProfile *cpuPerfProfile) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cpuPerfProfile.template, "-p", "NAME="+cpuPerfProfile.name, "ISOLATED="+cpuPerfProfile.isolated)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
8402b1f3-d98b-489a-9b1e-a6ae85d4e091
|
delete
|
['cpuPerfProfile']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (cpuPerfProfile *cpuPerfProfile) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("PerformanceProfile", cpuPerfProfile.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
794ec407-8796-4846-8622-dd592c3f45dc
|
create
|
['podCpuLoadBalance']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (podCpuLoadBalance *podCpuLoadBalance) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podCpuLoadBalance.template, "-p", "NAME="+podCpuLoadBalance.name, "NAMESPACE="+podCpuLoadBalance.namespace, "RUNTIMECLASS="+podCpuLoadBalance.runtimeclass)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
3f6b4fe9-05db-483f-a6d5-dd53225a3799
|
delete
|
['podCpuLoadBalance']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (podCpuLoadBalance *podCpuLoadBalance) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podCpuLoadBalance.namespace, "pod", podCpuLoadBalance.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
44b1a8bd-a260-47e9-af99-0442126a9bca
|
create
|
['podWASM']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (podWASM *podWASM) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podWASM.template, "-p", "NAME="+podWASM.name, "NAMESPACE="+podWASM.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
98235922-9975-4896-a4ff-bedce429aa5c
|
delete
|
['podWASM']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (podWASM *podWASM) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podWASM.namespace, "pod", podWASM.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
ab2a2398-d841-4759-8532-db0b483baafa
|
create
|
['podDevFuseDescription']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (podDevFuse *podDevFuseDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podDevFuse.template, "-p", "NAME="+podDevFuse.name, "NAMESPACE="+podDevFuse.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
02d0ea7b-9289-49ad-9576-6d579133a3d0
|
delete
|
['podDevFuseDescription']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (podDevFuse *podDevFuseDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podDevFuse.namespace, "pod", podDevFuse.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
c4896a99-96db-4a78-af0d-dd552be3cea2
|
checkDevFuseMount
|
['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func checkDevFuseMount(oc *exutil.CLI, namespace string, podname string) error {
return wait.Poll(1*time.Second, 3*time.Second, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("rsh").Args("-n", namespace, podname, "/bin/bash", "-c", "ls -al /dev | grep fuse").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(status, "fuse") {
e2e.Logf("\ndev fuse is mounted inside the pod")
return true, nil
}
return false, nil
})
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
b979f56c-98ea-4ced-a51c-7035464b04a6
|
create
|
['podLogLinkDescription']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (podLogLink *podLogLinkDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podLogLink.template, "-p", "NAME="+podLogLink.name, "NAMESPACE="+podLogLink.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
9f20ed32-1c05-4ddc-a3e5-da064391edb4
|
delete
|
['podLogLinkDescription']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (podLogLink *podLogLinkDescription) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podLogLink.namespace, "pod", podLogLink.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
5c1c316c-dbcd-4d1c-a195-d2ddc78ec35a
|
create
|
['"strconv"']
|
['liveProbeTermPeriod']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (liveProbe *liveProbeTermPeriod) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", liveProbe.template, "-p", "NAME="+liveProbe.name, "NAMESPACE="+liveProbe.namespace, "TERMINATIONGRACE="+strconv.Itoa(liveProbe.terminationgrace), "PROBETERMINATIONGRACE="+strconv.Itoa(liveProbe.probeterminationgrace))
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| |||
function
|
openshift/openshift-tests-private
|
87c62e2b-8716-4da7-9bd2-d26ec6a5680e
|
delete
|
['liveProbeTermPeriod']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (liveProbe *liveProbeTermPeriod) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", liveProbe.namespace, "pod", liveProbe.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
3c524d13-b443-420b-bb4b-aaf886a89265
|
create
|
['"strconv"']
|
['startProbeTermPeriod']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (startProbe *startProbeTermPeriod) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", startProbe.template, "-p", "NAME="+startProbe.name, "NAMESPACE="+startProbe.namespace, "TERMINATIONGRACE="+strconv.Itoa(startProbe.terminationgrace), "PROBETERMINATIONGRACE="+strconv.Itoa(startProbe.probeterminationgrace))
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| |||
function
|
openshift/openshift-tests-private
|
a01ca83f-8e8a-481f-bc96-ec33561becf8
|
delete
|
['startProbeTermPeriod']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (startProbe *startProbeTermPeriod) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", startProbe.namespace, "pod", startProbe.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| ||||
function
|
openshift/openshift-tests-private
|
9353fe34-28c9-4f3d-9592-191e5ecbde00
|
create
|
['"strconv"']
|
['readProbeTermPeriod']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (readProbe *readProbeTermPeriod) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", readProbe.template, "-p", "NAME="+readProbe.name, "NAMESPACE="+readProbe.namespace, "TERMINATIONGRACE="+strconv.Itoa(readProbe.terminationgrace), "PROBETERMINATIONGRACE="+strconv.Itoa(readProbe.probeterminationgrace))
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
| |||
function
|
openshift/openshift-tests-private
|
5de4302f-1212-47de-8c97-98a3531b48b8
|
delete
|
['readProbeTermPeriod']
|
github.com/openshift/openshift-tests-private/test/extended/node/node_utils.go
|
func (readProbe *readProbeTermPeriod) delete(oc *exutil.CLI) {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", readProbe.namespace, "pod", readProbe.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
|
node
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.