element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function | openshift/openshift-tests-private | 1dad3603-89ad-42dd-885b-1e215edc86b7 | create | ['idmsSource'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func (idmssrc *idmsSource) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", idmssrc.template, "-p", "NAME="+idmssrc.name, "MIRRORS="+idmssrc.mirrors, "SOURCE="+idmssrc.source)
o.Expect(err).NotTo(o.HaveOccurred())
} | imageregistry | ||||
function | openshift/openshift-tests-private | 0b78ea91-4c5f-4e86-82af-94f84c6e3b2c | delete | ['idmsSource'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func (idmssrc *idmsSource) delete(oc *exutil.CLI) {
e2e.Logf("deleting idms: %s", idmssrc.name)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("idms", idmssrc.name, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | imageregistry | ||||
function | openshift/openshift-tests-private | 5b01eafb-77c5-41ae-b172-c210d8ad18e5 | create | ['itmsSource'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func (itmssrc *itmsSource) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", itmssrc.template, "-p", "NAME="+itmssrc.name, "MIRRORS="+itmssrc.mirrors, "SOURCE="+itmssrc.source)
o.Expect(err).NotTo(o.HaveOccurred())
} | imageregistry | ||||
function | openshift/openshift-tests-private | 87f2933a-75d0-47f0-85c8-818bdea62b64 | delete | ['itmsSource'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func (itmssrc *itmsSource) delete(oc *exutil.CLI) {
e2e.Logf("deleting itms: %s", itmssrc.name)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("itms", itmssrc.name, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | imageregistry | ||||
function | openshift/openshift-tests-private | 7b4ec051-6195-4d91-a000-a2331541da7f | create | ['isStruct'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func (issrc *isStruct) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", issrc.template, "-p", "NAME="+issrc.name, "REPO="+issrc.repo, "NAMESPACE="+issrc.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
} | imageregistry | ||||
function | openshift/openshift-tests-private | 9bcc5d82-734d-441a-9ccd-ef87d6158a6b | GetMirrorRegistry | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func GetMirrorRegistry(oc *exutil.CLI) (registry string) {
registry, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("idms", "-o", "jsonpath={.items[0].spec.imageDigestMirrors[0].mirrors[0]}").Output()
if err != nil {
e2e.Failf("failed to acquire mirror registry from IDMS: %v", err)
} else {
registry, _, _ = strings.Cut(registry, "/")
}
return registry
} | imageregistry | ||||
function | openshift/openshift-tests-private | 7d75bc9e-90a0-4bb4-a881-12c2e4eb5bce | checkImagePruners | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func checkImagePruners(oc *exutil.CLI) bool {
impr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("imagepruners").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(impr, "No resources found") {
e2e.Logf("there is no imagepruners in this cluster")
return false
}
return true
} | imageregistry | ||||
function | openshift/openshift-tests-private | 8e2ef2e5-a176-4680-b1d4-aa1060f1a590 | get_osp_authurl | ['"encoding/base64"', '"regexp"', '"strings"', '"github.com/aws/aws-sdk-go-v2/credentials"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func get_osp_authurl(oc *exutil.CLI) string {
g.By("get authurl")
var authURL string
credentials, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/openstack-credentials", "-n", "kube-system", "-o", `jsonpath={.data.clouds\.yaml}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
credential, err := base64.StdEncoding.DecodeString(credentials)
o.Expect(err).NotTo(o.HaveOccurred())
r, _ := regexp.Compile("auth_url:.*")
match := r.FindAllString(string(credential), -1)
if strings.Contains(match[0], "auth_url") {
authURL = strings.Split(match[0], " ")[1]
return authURL
}
return ""
} | imageregistry | ||||
function | openshift/openshift-tests-private | 665f89c3-7419-4b02-abbb-770fca617342 | getgcloudClient | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func getgcloudClient(oc *exutil.CLI) *exutil.Gcloud {
if exutil.CheckPlatform(oc) != "gcp" {
g.Skip("it is not gcp platform!")
}
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcloud := exutil.Gcloud{ProjectID: projectID}
return gcloud.Login()
} | imageregistry | |||||
function | openshift/openshift-tests-private | 34829b91-3449-4e23-ad4e-a8575122f8b6 | filterTimestampFromLogs | ['"regexp"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func filterTimestampFromLogs(logs string, numberOfTimestamp int) []string {
return regexp.MustCompile("(?m)[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}.[0-9]{1,6}").FindAllString(logs, numberOfTimestamp)
} | imageregistry | ||||
function | openshift/openshift-tests-private | 37292c54-6298-46c9-90eb-60434f26f0be | getTimeDifferenceInMinute | ['"strconv"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func getTimeDifferenceInMinute(oldTimestamp, newTimestamp string) float64 {
oldTimeValues := strings.Split(oldTimestamp, ":")
oldTimeHour, _ := strconv.Atoi(oldTimeValues[0])
oldTimeMinute, _ := strconv.Atoi(oldTimeValues[1])
oldTimeSecond, _ := strconv.Atoi(strings.Split(oldTimeValues[2], ".")[0])
oldTimeNanoSecond, _ := strconv.Atoi(strings.Split(oldTimeValues[2], ".")[1])
newTimeValues := strings.Split(newTimestamp, ":")
newTimeHour, _ := strconv.Atoi(newTimeValues[0])
newTimeMinute, _ := strconv.Atoi(newTimeValues[1])
newTimeSecond, _ := strconv.Atoi(strings.Split(newTimeValues[2], ".")[0])
newTimeNanoSecond, _ := strconv.Atoi(strings.Split(newTimeValues[2], ".")[1])
y, m, d := time.Now().Date()
oldTime := time.Date(y, m, d, oldTimeHour, oldTimeMinute, oldTimeSecond, oldTimeNanoSecond, time.UTC)
newTime := time.Date(y, m, d, newTimeHour, newTimeMinute, newTimeSecond, newTimeNanoSecond, time.UTC)
return newTime.Sub(oldTime).Minutes()
} | imageregistry | ||||
function | openshift/openshift-tests-private | aa3be446-2903-4067-91a7-621e986baff5 | validateResourceEnv | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func validateResourceEnv(oc *exutil.CLI, namespace, resource, value string) {
result, err := oc.AsAdmin().WithoutNamespace().Run("set").Args("env", "-n", namespace, resource, "--list").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(result, value)).To(o.BeTrue())
} | imageregistry | ||||
function | openshift/openshift-tests-private | 00d74fbb-85b9-4ed5-8699-1e769ea47ec5 | checkDiscPolicy | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func checkDiscPolicy(oc *exutil.CLI) (string, bool) {
sites := [3]string{"ImageContentSourcePolicy", "idms", "itms"}
for _, policy := range sites {
result, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(policy).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(result, "No resources found") {
return policy, true
}
}
return "", false
} | imageregistry | ||||
function | openshift/openshift-tests-private | 12a14c20-d431-4059-8887-0d8c09a588ae | checkMirrorRegistry | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func checkMirrorRegistry(oc *exutil.CLI, repo string) string {
policy, dis := checkDiscPolicy(oc)
switch dis {
case policy == "ImageContentSourcePolicy":
mirrorReg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageContentSourcePolicy/image-policy-aosqe", "-o=jsonpath={.spec.repositoryDigestMirrors[?(@.source==\""+repo+"\")].mirrors[]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return mirrorReg
case policy == "idms":
mirrorReg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("idms/image-policy-aosqe", "-o=jsonpath={.spec.imageDigestMirrors[?(@.source==\""+repo+"\")].mirrors[]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return mirrorReg
case policy == "itms":
mirrorReg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("itms/image-policy-aosqe", "-o=jsonpath={.spec.imageTagMirrors[?(@.source==\""+repo+"\")].mirrors[]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return mirrorReg
}
return ""
} | imageregistry | |||||
function | openshift/openshift-tests-private | f8d44921-543f-4bbd-99d5-bbe8ae1c5c44 | SkipDnsFailure | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func SkipDnsFailure(oc *exutil.CLI) {
expectedStatus := map[string]string{"Available": "True", "Progressing": "False", "Degraded": "False"}
err := waitCoBecomes(oc, "ingress", 240, expectedStatus)
if err != nil {
g.Skip("Ingress is not ready, skip the case test!")
}
err = waitCoBecomes(oc, "dns", 240, expectedStatus)
if err != nil {
g.Skip("Dns is not ready, skip the case test!")
}
} | imageregistry | |||||
function | openshift/openshift-tests-private | 8f40ccb0-3ced-4e0d-8781-a5a1614d28c5 | isIPIAzure | ['"strings"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func isIPIAzure(oc *exutil.CLI) bool {
result, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "openshift-install", "-n", "openshift-config").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(result, "NotFound") {
return true
}
return false
} | imageregistry | ||||
function | openshift/openshift-tests-private | 511563d0-a42c-45af-972b-67ef24fa376d | hasDuplicate | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func hasDuplicate(slice []string, value string) bool {
countMap := make(map[string]int)
for _, v := range slice {
if v == value {
countMap[v]++
if countMap[v] > 1 {
return true
}
}
}
return false
} | imageregistry | |||||
function | openshift/openshift-tests-private | affa7d76-3934-43b8-82b3-a9966181ec13 | configureRegistryStorageToPvc | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func configureRegistryStorageToPvc(oc *exutil.CLI, pvcName string) {
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("configs.imageregistry/cluster", "-p", `{"spec":{"storage":null, "managementState":"Unmanaged"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
patchInfo := fmt.Sprintf("{\"spec\":{\"managementState\":\"Managed\",\"replicas\":1,\"rolloutStrategy\":\"Recreate\",\"storage\":{\"managementState\":\"Managed\",\"pvc\":{\"claim\":\"%s\"}}}}", pvcName)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("configs.imageregistry/cluster", "-p", patchInfo, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkPodsRunningWithLabel(oc, "openshift-image-registry", "docker-registry=default", 1)
} | imageregistry | ||||
function | openshift/openshift-tests-private | 7c2c0c81-15fe-45f3-ae85-c7a8980bbb12 | create | ['persistentVolumeClaim'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func (pvc *persistentVolumeClaim) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pvc.template, "-p", "NAME="+pvc.name, "NAMESPACE="+pvc.namespace, "MEMORYSIZE="+pvc.memorysize, "STORAGECLASSNAME="+pvc.storageclassname, "ACCESSMODE="+pvc.accessmode)
o.Expect(err).NotTo(o.HaveOccurred())
} | imageregistry | ||||
function | openshift/openshift-tests-private | 9fc8ae4d-9fdd-46ab-862e-e72ee2846987 | waitForPvcStatus | ['"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func waitForPvcStatus(oc *exutil.CLI, namespace string, pvcname string) {
err := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) {
pvStatus, err := oc.AsAdmin().Run("get").Args("-n", namespace, "pvc", pvcname, "-o=jsonpath='{.status.phase}'").Output()
if err != nil {
return false, err
}
if match, _ := regexp.MatchString("Bound", pvStatus); match {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The PVC is not Bound as expected")
} | imageregistry | ||||
function | openshift/openshift-tests-private | 3119c9f4-58bb-4869-8275-e7fb2c682000 | checkMetric | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func checkMetric(oc *exutil.CLI, url, token, metricString string, timeout time.Duration) {
var metrics string
var err error
getCmd := "curl -G -k -s -H \"Authorization:Bearer " + token + "\" " + url
err = wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, timeout*time.Second, false, func(context.Context) (bool, error) {
metrics, err = exutil.RemoteShPod(oc, "openshift-monitoring", "prometheus-k8s-0", "sh", "-c", getCmd)
if err != nil || !strings.Contains(metrics, metricString) {
return false, nil
}
return true, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The metrics %s failed to contain %s", metrics, metricString))
} | imageregistry | ||||
function | openshift/openshift-tests-private | 1c20248e-206e-4a65-89b1-c81403f1ade5 | getAzureImageRegistryStorage | ['"encoding/json"', '"github.com/aws/aws-sdk-go-v2/config"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | ['azureStorageSetting'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func getAzureImageRegistryStorage(oc *exutil.CLI) azureStorageSetting {
var setting azureStorageSetting
output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("config.image/cluster", "-o=jsonpath={.status.storage.azure}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
json.Unmarshal([]byte(output), &setting)
return setting
} | imageregistry | |||
function | openshift/openshift-tests-private | 11ce0cd4-d4d7-4271-a045-68e4d6262921 | getAzureRegion | ['"encoding/base64"', '"github.com/aws/aws-sdk-go-v2/credentials"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func getAzureRegion(oc *exutil.CLI) string {
regionSec, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/installer-cloud-credentials", "-n", "openshift-image-registry", "-o=jsonpath={.data.azure_region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
region, err := base64.StdEncoding.DecodeString(regionSec)
o.Expect(err).NotTo(o.HaveOccurred())
return string(region)
} | imageregistry | ||||
function | openshift/openshift-tests-private | 3798b878-4ff4-4e99-b676-3c1ddeb1e226 | getResourceGroupOnAzure | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func getResourceGroupOnAzure(oc *exutil.CLI) string {
resourceGroup, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructures", "cluster", "-o=jsonpath={.status.platformStatus.azure.resourceGroupName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return resourceGroup
} | imageregistry | |||||
function | openshift/openshift-tests-private | d0f6cadb-b548-415d-8e49-913485177744 | IsFeaturegateEnabled | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func IsFeaturegateEnabled(oc *exutil.CLI, featuregate string) (bool, error) {
enabledFeatureGates, err := getEnabledFeatureGates(oc)
if err != nil {
return false, err
}
for _, f := range enabledFeatureGates {
if f == featuregate {
return true, nil
}
}
return false, nil
} | imageregistry | |||||
function | openshift/openshift-tests-private | 8837ca86-bee9-4179-9b1f-e2d047d0f82c | getEnabledFeatureGates | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func getEnabledFeatureGates(oc *exutil.CLI) ([]string, error) {
enabledFeatureGates, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.status.featureGates[0].enabled[*].name}").Output()
if err != nil {
return nil, err
}
return strings.Split(enabledFeatureGates, " "), nil
} | imageregistry | ||||
function | openshift/openshift-tests-private | 102cd373-2668-495c-92df-f416bb00bc6b | IsMultiArch | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/image_registry/util.go | func IsMultiArch(oc *exutil.CLI) bool {
architecture, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={..status.desired.architecture}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.ToLower(architecture) == "multi"
} | imageregistry | ||||
test | openshift/openshift-tests-private | 548eeecc-2bd7-4435-a05a-d855b2b3f0e9 | bmo_validations | import (
"fmt"
"os"
"path/filepath"
"strconv"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/bmo_validations.go | package baremetal
import (
"fmt"
"os"
"path/filepath"
"strconv"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_GENERAL job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_DEDICATED job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
// var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_GENERAL job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_DEDICATED job on BareMetal", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
iaasPlatform string
dirname string
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = exutil.CheckPlatform(oc)
if !(iaasPlatform == "baremetal") {
e2e.Logf("Cluster is: %s", iaasPlatform)
g.Skip("For Non-baremetal cluster , this is not supported!")
}
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-66490-Allow modification of BMC address after installation [Disruptive]", func() {
g.By("Running oc patch bmh -n openshift-machine-api master-00")
bmhName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[4].metadata.name}").Output()
bmcAddressOrig, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[4].spec.bmc.address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
patchConfig := `[{"op": "replace", "path": "/spec/bmc/address", "value":"redfish-virtualmedia://10.1.234.25/redfish/v1/Systems/System.Embedded.1"}]`
out, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=json", "-p", patchConfig).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("denied the request: BMC address can not be changed if the BMH is not in the Registering state, or if the BMH is not detached"))
g.By("Detach the BareMetal host")
patch := `{"metadata":{"annotations":{"baremetalhost.metal3.io/detached": ""}}}`
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=merge", "-p", patch).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Modify BMC address of BareMetal host")
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=json", "-p", patchConfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
g.By("Revert changes")
patchConfig = fmt.Sprintf(`[{"op": "replace", "path": "/spec/bmc/address", "value": "%s"}]`, bmcAddressOrig)
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=json", "-p", patchConfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
patchConfig = `[{"op": "remove", "path": "/metadata/annotations/baremetalhost.metal3.io~1detached"}]`
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=json", "-p", patchConfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}()
bmcAddress, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[4].spec.bmc.address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(bmcAddress).To(o.ContainSubstring("redfish-virtualmedia://10.1.234.25/redfish/v1/Systems/System.Embedded.1"))
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-66491-bootMACAddress can't be changed once set [Disruptive]", func() {
g.By("Running oc patch bmh -n openshift-machine-api master-00")
bmhName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
patchConfig := `[{"op": "replace", "path": "/spec/bootMACAddress", "value":"f4:02:70:b8:d8:ff"}]`
out, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=json", "-p", patchConfig).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("bootMACAddress can not be changed once it is set"))
})
// author: [email protected]
g.It("Author:jhajyahy-Longduration-NonPreRelease-Medium-74940-Root device hints should accept by-path device alias [Disruptive]", func() {
dirname = "OCP-74940.log"
bmhName, getBmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, "-o=jsonpath={.items[4].metadata.name}").Output()
o.Expect(getBmhErr).NotTo(o.HaveOccurred(), "Failed to get bmh name")
baseDir := exutil.FixturePath("testdata", "installer")
bmhYaml := filepath.Join(baseDir, "baremetal", "bmh.yaml")
bmcAddress, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.spec.bmc.address}").Output()
bootMACAddress, getBbootMACAddressErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.spec.bootMACAddress}").Output()
o.Expect(getBbootMACAddressErr).NotTo(o.HaveOccurred(), "Failed to get bootMACAddress")
rootDeviceHints := getBypathDeviceName(oc, bmhName)
bmcSecretName, getBMHSecretErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.spec.bmc.credentialsName}").Output()
o.Expect(getBMHSecretErr).NotTo(o.HaveOccurred(), "Failed to get bmh secret")
bmcSecretuser, getBmcUserErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "-n", machineAPINamespace, bmcSecretName, "-o=jsonpath={.data.username}").Output()
o.Expect(getBmcUserErr).NotTo(o.HaveOccurred(), "Failed to get bmh secret user")
bmcSecretPass, getBmcPassErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "-n", machineAPINamespace, bmcSecretName, "-o=jsonpath={.data.password}").Output()
o.Expect(getBmcPassErr).NotTo(o.HaveOccurred(), "Failed to get bmh secret password")
bmhSecretYaml := filepath.Join(baseDir, "baremetal", "bmh-secret.yaml")
defer os.Remove(bmhSecretYaml)
exutil.ModifyYamlFileContent(bmhSecretYaml, []exutil.YamlReplace{
{
Path: "data.username",
Value: bmcSecretuser,
},
{
Path: "data.password",
Value: bmcSecretPass,
},
{
Path: "metadata.name",
Value: bmcSecretName,
},
})
exutil.ModifyYamlFileContent(bmhYaml, []exutil.YamlReplace{
{
Path: "metadata.name",
Value: bmhName,
},
{
Path: "spec.bmc.address",
Value: bmcAddress,
},
{
Path: "spec.bootMACAddress",
Value: bootMACAddress,
},
{
Path: "spec.rootDeviceHints.deviceName",
Value: rootDeviceHints,
},
{
Path: "spec.bmc.credentialsName",
Value: bmcSecretName,
},
})
exutil.By("Get machine name of host")
machine, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.spec.consumerRef.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Get the origin number of replicas
machineSet, cmdErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", "-n", machineAPINamespace, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
originReplicasStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", machineSet, "-n", machineAPINamespace, "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Annotate worker-01 machine for deletion")
_, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("machine", machine, "machine.openshift.io/cluster-api-delete-machine=yes", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Scale down machineset")
originReplicas, err := strconv.Atoi(originReplicasStr)
o.Expect(err).NotTo(o.HaveOccurred())
newReplicas := originReplicas - 1
_, err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%d", newReplicas)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitForBMHState(oc, bmhName, "available")
exutil.By("Delete worker node")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("bmh", "-n", machineAPINamespace, bmhName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
currentReplicasStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", machineSet, "-n", machineAPINamespace, "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Only scale back if the new number of replicas is different from the original
if currentReplicasStr != originReplicasStr {
exutil.By("Create bmh secret using saved yaml file")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", bmhSecretYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create bmh using saved yaml file")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", bmhYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
_, err := oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%s", originReplicasStr)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeHealthErr := clusterNodesHealthcheck(oc, 1500)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Nodes do not recover healthy in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators do not recover healthy in time!")
}
}()
waitForBMHDeletion(oc, bmhName)
exutil.By("Create bmh secret using saved yaml file")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", bmhSecretYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create bmh using saved yaml file")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", bmhYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%s", originReplicasStr)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitForBMHState(oc, bmhName, "provisioned")
nodeHealthErr := clusterNodesHealthcheck(oc, 1500)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Nodes do not recover healthy in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators do not recover healthy in time!")
actualRootDeviceHints, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.spec.rootDeviceHints.deviceName}").Output()
o.Expect(actualRootDeviceHints).Should(o.Equal(rootDeviceHints))
})
})
| package baremetal | ||||
test case | openshift/openshift-tests-private | e4fcedd1-f823-407d-9fa1-593d7930ddc6 | Author:jhajyahy-Medium-66490-Allow modification of BMC address after installation [Disruptive] | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/bmo_validations.go | g.It("Author:jhajyahy-Medium-66490-Allow modification of BMC address after installation [Disruptive]", func() {
g.By("Running oc patch bmh -n openshift-machine-api master-00")
bmhName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[4].metadata.name}").Output()
bmcAddressOrig, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[4].spec.bmc.address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
patchConfig := `[{"op": "replace", "path": "/spec/bmc/address", "value":"redfish-virtualmedia://10.1.234.25/redfish/v1/Systems/System.Embedded.1"}]`
out, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=json", "-p", patchConfig).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("denied the request: BMC address can not be changed if the BMH is not in the Registering state, or if the BMH is not detached"))
g.By("Detach the BareMetal host")
patch := `{"metadata":{"annotations":{"baremetalhost.metal3.io/detached": ""}}}`
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=merge", "-p", patch).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Modify BMC address of BareMetal host")
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=json", "-p", patchConfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
g.By("Revert changes")
patchConfig = fmt.Sprintf(`[{"op": "replace", "path": "/spec/bmc/address", "value": "%s"}]`, bmcAddressOrig)
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=json", "-p", patchConfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
patchConfig = `[{"op": "remove", "path": "/metadata/annotations/baremetalhost.metal3.io~1detached"}]`
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=json", "-p", patchConfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}()
bmcAddress, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[4].spec.bmc.address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(bmcAddress).To(o.ContainSubstring("redfish-virtualmedia://10.1.234.25/redfish/v1/Systems/System.Embedded.1"))
}) | |||||
test case | openshift/openshift-tests-private | bf413264-e490-4ff3-9c8b-dd1656fd8c97 | Author:jhajyahy-Medium-66491-bootMACAddress can't be changed once set [Disruptive] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/bmo_validations.go | g.It("Author:jhajyahy-Medium-66491-bootMACAddress can't be changed once set [Disruptive]", func() {
g.By("Running oc patch bmh -n openshift-machine-api master-00")
bmhName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
patchConfig := `[{"op": "replace", "path": "/spec/bootMACAddress", "value":"f4:02:70:b8:d8:ff"}]`
out, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("bmh", "-n", machineAPINamespace, bmhName, "--type=json", "-p", patchConfig).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("bootMACAddress can not be changed once it is set"))
}) | ||||||
test case | openshift/openshift-tests-private | fa9babbd-4d4e-4a35-bfd3-decd41682e60 | Author:jhajyahy-Longduration-NonPreRelease-Medium-74940-Root device hints should accept by-path device alias [Disruptive] | ['"fmt"', '"os"', '"path/filepath"', '"strconv"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/bmo_validations.go | g.It("Author:jhajyahy-Longduration-NonPreRelease-Medium-74940-Root device hints should accept by-path device alias [Disruptive]", func() {
dirname = "OCP-74940.log"
bmhName, getBmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, "-o=jsonpath={.items[4].metadata.name}").Output()
o.Expect(getBmhErr).NotTo(o.HaveOccurred(), "Failed to get bmh name")
baseDir := exutil.FixturePath("testdata", "installer")
bmhYaml := filepath.Join(baseDir, "baremetal", "bmh.yaml")
bmcAddress, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.spec.bmc.address}").Output()
bootMACAddress, getBbootMACAddressErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.spec.bootMACAddress}").Output()
o.Expect(getBbootMACAddressErr).NotTo(o.HaveOccurred(), "Failed to get bootMACAddress")
rootDeviceHints := getBypathDeviceName(oc, bmhName)
bmcSecretName, getBMHSecretErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.spec.bmc.credentialsName}").Output()
o.Expect(getBMHSecretErr).NotTo(o.HaveOccurred(), "Failed to get bmh secret")
bmcSecretuser, getBmcUserErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "-n", machineAPINamespace, bmcSecretName, "-o=jsonpath={.data.username}").Output()
o.Expect(getBmcUserErr).NotTo(o.HaveOccurred(), "Failed to get bmh secret user")
bmcSecretPass, getBmcPassErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "-n", machineAPINamespace, bmcSecretName, "-o=jsonpath={.data.password}").Output()
o.Expect(getBmcPassErr).NotTo(o.HaveOccurred(), "Failed to get bmh secret password")
bmhSecretYaml := filepath.Join(baseDir, "baremetal", "bmh-secret.yaml")
defer os.Remove(bmhSecretYaml)
exutil.ModifyYamlFileContent(bmhSecretYaml, []exutil.YamlReplace{
{
Path: "data.username",
Value: bmcSecretuser,
},
{
Path: "data.password",
Value: bmcSecretPass,
},
{
Path: "metadata.name",
Value: bmcSecretName,
},
})
exutil.ModifyYamlFileContent(bmhYaml, []exutil.YamlReplace{
{
Path: "metadata.name",
Value: bmhName,
},
{
Path: "spec.bmc.address",
Value: bmcAddress,
},
{
Path: "spec.bootMACAddress",
Value: bootMACAddress,
},
{
Path: "spec.rootDeviceHints.deviceName",
Value: rootDeviceHints,
},
{
Path: "spec.bmc.credentialsName",
Value: bmcSecretName,
},
})
exutil.By("Get machine name of host")
machine, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.spec.consumerRef.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Get the origin number of replicas
machineSet, cmdErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", "-n", machineAPINamespace, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
originReplicasStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", machineSet, "-n", machineAPINamespace, "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Annotate worker-01 machine for deletion")
_, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("machine", machine, "machine.openshift.io/cluster-api-delete-machine=yes", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Scale down machineset")
originReplicas, err := strconv.Atoi(originReplicasStr)
o.Expect(err).NotTo(o.HaveOccurred())
newReplicas := originReplicas - 1
_, err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%d", newReplicas)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitForBMHState(oc, bmhName, "available")
exutil.By("Delete worker node")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("bmh", "-n", machineAPINamespace, bmhName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
currentReplicasStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", machineSet, "-n", machineAPINamespace, "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Only scale back if the new number of replicas is different from the original
if currentReplicasStr != originReplicasStr {
exutil.By("Create bmh secret using saved yaml file")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", bmhSecretYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create bmh using saved yaml file")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", bmhYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
_, err := oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%s", originReplicasStr)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeHealthErr := clusterNodesHealthcheck(oc, 1500)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Nodes do not recover healthy in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators do not recover healthy in time!")
}
}()
waitForBMHDeletion(oc, bmhName)
exutil.By("Create bmh secret using saved yaml file")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", bmhSecretYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create bmh using saved yaml file")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", bmhYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%s", originReplicasStr)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitForBMHState(oc, bmhName, "provisioned")
nodeHealthErr := clusterNodesHealthcheck(oc, 1500)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Nodes do not recover healthy in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators do not recover healthy in time!")
actualRootDeviceHints, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.spec.rootDeviceHints.deviceName}").Output()
o.Expect(actualRootDeviceHints).Should(o.Equal(rootDeviceHints))
}) | |||||
test | openshift/openshift-tests-private | 140c2159-8bf6-4d42-88c9-c794d8b1267f | cbo | import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/cbo.go | package baremetal
import (
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_GENERAL job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_DEDICATED job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_GENERAL job on BareMetal", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
iaasPlatform string
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = exutil.CheckPlatform(oc)
if !(iaasPlatform == "baremetal") {
e2e.Logf("Cluster is: %s", iaasPlatform)
g.Skip("For Non-baremetal cluster , this is not supported!")
}
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-33516-Verify that cluster baremetal operator is active", func() {
g.By("Running oc get clusteroperators baremetal")
status, err := checkOperator(oc, "baremetal")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(status).To(o.BeTrue())
g.By("Run oc describe clusteroperators baremetal")
output, err := oc.AsAdmin().Run("get").Args("clusteroperator", "baremetal").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).ShouldNot(o.BeEmpty())
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-36446-Verify openshift-machine-api namespace is still there and Ready", func() {
g.By("Running oc get project openshift-machine-api")
nsStatus, err := oc.AsAdmin().Run("get").Args("project", machineAPINamespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nsStatus).Should(o.Equal("Active"))
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-36909-Verify metal3 pod is controlled by cluster baremetal operator", func() {
g.By("Running oc get deployment -n openshift-machine-api")
annotations, err := oc.AsAdmin().Run("get").Args("deployment", "-n", machineAPINamespace, "metal3", "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(annotations).Should(o.ContainSubstring("baremetal.openshift.io/owned"))
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-36445-Verify new additions to openshift-machine-api project", func() {
g.By("Running oc get serviceaccount -n openshift-machine-api cluster-baremetal-operator")
serviceAccount, err := oc.AsAdmin().Run("get").Args("serviceaccount", "-n", machineAPINamespace, "cluster-baremetal-operator", "-o=jsonpath={.metadata.name}:{.kind}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(serviceAccount).Should(o.Equal("cluster-baremetal-operator:ServiceAccount"))
g.By("Running oc get provisioning provisioning-configuration")
prov, err := oc.AsAdmin().Run("get").Args("provisioning", "provisioning-configuration", "-o=jsonpath={.metadata.name}:{.kind}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(prov).Should(o.Equal("provisioning-configuration:Provisioning"))
g.By("Running oc get deploy -n openshift-machine-api metal3")
priority, err := oc.AsAdmin().Run("get").Args("deployment", "-n", machineAPINamespace, "metal3", "-o=jsonpath={.spec.template.spec.priorityClassName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(priority).Should(o.Equal("system-node-critical"))
})
})
var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_DEDICATED job on BareMetal", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
iaasPlatform string
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = exutil.CheckPlatform(oc)
if !(iaasPlatform == "baremetal") {
e2e.Logf("Cluster is: %s", iaasPlatform)
g.Skip("For Non-baremetal cluster , this is not supported!")
}
})
g.It("Author:jhajyahy-Medium-38155-Verify when deleting the Provisioning CR, the associated resources are deleted[Serial]", func() {
g.By("Save provisioning-configuration as yaml file")
filePath, err := oc.AsAdmin().Run("get").Args("provisioning", "provisioning-configuration", "-o=yaml").OutputToFile("prov.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := oc.AsAdmin().Run("get").Args("provisioning", "provisioning-configuration").Execute()
if err != nil {
errApply := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filePath).Execute()
o.Expect(errApply).NotTo(o.HaveOccurred())
waitForDeployStatus(oc, "metal3", machineAPINamespace, "True")
cboStatus, err := checkOperator(oc, "baremetal")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cboStatus).To(o.BeTrue())
}
}()
g.By("Delete provisioning-configuration")
deleteErr := oc.AsAdmin().Run("delete").Args("provisioning", "provisioning-configuration").Execute()
o.Expect(deleteErr).NotTo(o.HaveOccurred())
waitForPodNotFound(oc, "metal3", machineAPINamespace)
g.By("Check metal3 pods, services, secrets and deployment are deleted")
secrets, secretErr := oc.AsAdmin().Run("get").Args("secrets", "-n", machineAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(secretErr).NotTo(o.HaveOccurred())
o.Expect(secrets).ShouldNot(o.ContainSubstring("metal3"))
allResources, allErr := oc.AsAdmin().Run("get").Args("all", "-n", machineAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(allErr).NotTo(o.HaveOccurred())
o.Expect(allResources).ShouldNot(o.ContainSubstring("metal3"))
g.By("Check cluster baremetal operator still available")
status, err := checkOperator(oc, "baremetal")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(status).To(o.BeTrue())
g.By("Recreate provisioning-configuration")
createErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filePath).Execute()
o.Expect(createErr).NotTo(o.HaveOccurred())
g.By("Check metal3 pods, services, secrets and deployment are recreated")
waitForDeployStatus(oc, "metal3", machineAPINamespace, "True")
metal3Secrets, secretErr := oc.AsAdmin().Run("get").Args("secrets", "-n", machineAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(secretErr).NotTo(o.HaveOccurred())
o.Expect(metal3Secrets).Should(o.ContainSubstring("metal3"))
pods, err := oc.AsAdmin().Run("get").Args("pods", "-n", machineAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podlist := strings.Fields(pods)
for _, pod := range podlist {
podStatus := getPodStatus(oc, machineAPINamespace, pod)
o.Expect(podStatus).Should(o.Equal("Running"))
}
g.By("Check cluster baremetal operator is available")
cboStatus, err := checkOperator(oc, "baremetal")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cboStatus).To(o.BeTrue())
})
})
| package baremetal | ||||
test case | openshift/openshift-tests-private | 0e92cd96-e76c-449a-b4d0-9324827dca78 | Author:jhajyahy-Medium-33516-Verify that cluster baremetal operator is active | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/cbo.go | g.It("Author:jhajyahy-Medium-33516-Verify that cluster baremetal operator is active", func() {
g.By("Running oc get clusteroperators baremetal")
status, err := checkOperator(oc, "baremetal")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(status).To(o.BeTrue())
g.By("Run oc describe clusteroperators baremetal")
output, err := oc.AsAdmin().Run("get").Args("clusteroperator", "baremetal").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(output).ShouldNot(o.BeEmpty())
}) | ||||||
test case | openshift/openshift-tests-private | 62f578a1-ad3f-4a64-8981-c6928d45c7de | Author:jhajyahy-Medium-36446-Verify openshift-machine-api namespace is still there and Ready | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/cbo.go | g.It("Author:jhajyahy-Medium-36446-Verify openshift-machine-api namespace is still there and Ready", func() {
g.By("Running oc get project openshift-machine-api")
nsStatus, err := oc.AsAdmin().Run("get").Args("project", machineAPINamespace, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nsStatus).Should(o.Equal("Active"))
}) | ||||||
test case | openshift/openshift-tests-private | 755ff722-4d4d-4e55-bcca-94616f819976 | Author:jhajyahy-Medium-36909-Verify metal3 pod is controlled by cluster baremetal operator | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/cbo.go | g.It("Author:jhajyahy-Medium-36909-Verify metal3 pod is controlled by cluster baremetal operator", func() {
g.By("Running oc get deployment -n openshift-machine-api")
annotations, err := oc.AsAdmin().Run("get").Args("deployment", "-n", machineAPINamespace, "metal3", "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(annotations).Should(o.ContainSubstring("baremetal.openshift.io/owned"))
}) | ||||||
test case | openshift/openshift-tests-private | e46a5703-1b6d-4053-84b7-ea809e7022e5 | Author:jhajyahy-Medium-36445-Verify new additions to openshift-machine-api project | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/cbo.go | g.It("Author:jhajyahy-Medium-36445-Verify new additions to openshift-machine-api project", func() {
g.By("Running oc get serviceaccount -n openshift-machine-api cluster-baremetal-operator")
serviceAccount, err := oc.AsAdmin().Run("get").Args("serviceaccount", "-n", machineAPINamespace, "cluster-baremetal-operator", "-o=jsonpath={.metadata.name}:{.kind}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(serviceAccount).Should(o.Equal("cluster-baremetal-operator:ServiceAccount"))
g.By("Running oc get provisioning provisioning-configuration")
prov, err := oc.AsAdmin().Run("get").Args("provisioning", "provisioning-configuration", "-o=jsonpath={.metadata.name}:{.kind}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(prov).Should(o.Equal("provisioning-configuration:Provisioning"))
g.By("Running oc get deploy -n openshift-machine-api metal3")
priority, err := oc.AsAdmin().Run("get").Args("deployment", "-n", machineAPINamespace, "metal3", "-o=jsonpath={.spec.template.spec.priorityClassName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(priority).Should(o.Equal("system-node-critical"))
}) | ||||||
test case | openshift/openshift-tests-private | 496d5e26-0881-4ada-ad34-e6962df52293 | Author:jhajyahy-Medium-38155-Verify when deleting the Provisioning CR, the associated resources are deleted[Serial] | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/cbo.go | g.It("Author:jhajyahy-Medium-38155-Verify when deleting the Provisioning CR, the associated resources are deleted[Serial]", func() {
g.By("Save provisioning-configuration as yaml file")
filePath, err := oc.AsAdmin().Run("get").Args("provisioning", "provisioning-configuration", "-o=yaml").OutputToFile("prov.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := oc.AsAdmin().Run("get").Args("provisioning", "provisioning-configuration").Execute()
if err != nil {
errApply := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filePath).Execute()
o.Expect(errApply).NotTo(o.HaveOccurred())
waitForDeployStatus(oc, "metal3", machineAPINamespace, "True")
cboStatus, err := checkOperator(oc, "baremetal")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cboStatus).To(o.BeTrue())
}
}()
g.By("Delete provisioning-configuration")
deleteErr := oc.AsAdmin().Run("delete").Args("provisioning", "provisioning-configuration").Execute()
o.Expect(deleteErr).NotTo(o.HaveOccurred())
waitForPodNotFound(oc, "metal3", machineAPINamespace)
g.By("Check metal3 pods, services, secrets and deployment are deleted")
secrets, secretErr := oc.AsAdmin().Run("get").Args("secrets", "-n", machineAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(secretErr).NotTo(o.HaveOccurred())
o.Expect(secrets).ShouldNot(o.ContainSubstring("metal3"))
allResources, allErr := oc.AsAdmin().Run("get").Args("all", "-n", machineAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(allErr).NotTo(o.HaveOccurred())
o.Expect(allResources).ShouldNot(o.ContainSubstring("metal3"))
g.By("Check cluster baremetal operator still available")
status, err := checkOperator(oc, "baremetal")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(status).To(o.BeTrue())
g.By("Recreate provisioning-configuration")
createErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filePath).Execute()
o.Expect(createErr).NotTo(o.HaveOccurred())
g.By("Check metal3 pods, services, secrets and deployment are recreated")
waitForDeployStatus(oc, "metal3", machineAPINamespace, "True")
metal3Secrets, secretErr := oc.AsAdmin().Run("get").Args("secrets", "-n", machineAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(secretErr).NotTo(o.HaveOccurred())
o.Expect(metal3Secrets).Should(o.ContainSubstring("metal3"))
pods, err := oc.AsAdmin().Run("get").Args("pods", "-n", machineAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podlist := strings.Fields(pods)
for _, pod := range podlist {
podStatus := getPodStatus(oc, machineAPINamespace, pod)
o.Expect(podStatus).Should(o.Equal("Running"))
}
g.By("Check cluster baremetal operator is available")
cboStatus, err := checkOperator(oc, "baremetal")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cboStatus).To(o.BeTrue())
}) | |||||
test | openshift/openshift-tests-private | 1c667bf7-c1fa-406a-a6bb-ede71701c423 | deployment_sanity | import (
"fmt"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/deployment_sanity.go | package baremetal
import (
"fmt"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_GENERAL job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("baremetal-deployment-sanity", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_DEDICATED job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("baremetal-deployment-sanity", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_GENERAL job on BareMetal", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("baremetal-deployment-sanity", exutil.KubeConfigPath())
iaasPlatform string
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = exutil.CheckPlatform(oc)
if !(iaasPlatform == "baremetal") {
e2e.Logf("Cluster is: %s", iaasPlatform)
g.Skip("For Non-baremetal cluster , this is not supported!")
}
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-29146-Verify that all clusteroperators are Available", func() {
g.By("Running oc get clusteroperators")
res, _ := checkOperatorsRunning(oc)
o.Expect(res).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-29719-Verify that all nodes are up and running", func() {
g.By("Running oc get nodes")
res, _ := checkNodesRunning(oc)
o.Expect(res).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-32361-Verify that deployment exists and is not empty", func() {
g.By("Create new namespace")
oc.SetupProject()
ns32361 := oc.Namespace()
g.By("Create deployment")
deployCreationErr := oc.Run("create").Args("deployment", "deploy32361", "-n", ns32361, "--image", "quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83").Execute()
o.Expect(deployCreationErr).NotTo(o.HaveOccurred())
g.By("Check deployment status is available")
waitForDeployStatus(oc, "deploy32361", ns32361, "True")
status, err := oc.AsAdmin().Run("get").Args("deployment", "-n", ns32361, "deploy32361", "-o=jsonpath={.status.conditions[?(@.type=='Available')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nDeployment %s Status is %s\n", "deploy32361", status)
o.Expect(status).To(o.Equal("True"))
g.By("Check pod is in Running state")
podName := getPodName(oc, ns32361)
podStatus := getPodStatus(oc, ns32361, podName)
o.Expect(podStatus).To(o.Equal("Running"))
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-34195-Verify all pods replicas are running on workers only", func() {
g.By("Create new namespace")
oc.SetupProject()
ns34195 := oc.Namespace()
g.By("Create deployment with num of workers + 1 replicas")
workerNodes, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
replicasNum := len(workerNodes) + 1
deployCreationErr := oc.Run("create").Args("deployment", "deploy34195", "-n", ns34195, fmt.Sprintf("--replicas=%d", replicasNum), "--image", "quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83").Execute()
o.Expect(deployCreationErr).NotTo(o.HaveOccurred())
waitForDeployStatus(oc, "deploy34195", ns34195, "True")
g.By("Check deployed pods number is as expected")
pods, err := oc.AsAdmin().Run("get").Args("pods", "-n", ns34195, "--field-selector=status.phase=Running", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podList := strings.Fields(pods)
o.Expect(len(podList)).To(o.Equal(replicasNum))
g.By("Check pods are deployed on worker nodes only")
for _, pod := range podList {
podNodeName, err := exutil.GetPodNodeName(oc, ns34195, pod)
o.Expect(err).NotTo(o.HaveOccurred())
res := exutil.IsWorkerNode(oc, podNodeName)
if !res {
e2e.Logf("\nPod %s was deployed on non worker node %s\n", pod, podNodeName)
}
o.Expect(res).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-39126-Verify maximum CPU usage limit hasn't reached on each of the nodes", func() {
g.By("Running oc get nodes")
cpuExceededNodes := []string{}
sampling_time, err := getClusterUptime(oc)
o.Expect(err).NotTo(o.HaveOccurred())
nodeNames, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred(), "Failed to execute oc get nodes")
nodes := strings.Fields(nodeNames)
for _, node := range nodes {
cpuUsage := getNodeCpuUsage(oc, node, sampling_time)
if cpuUsage > maxCpuUsageAllowed {
cpuExceededNodes = append(cpuExceededNodes, node)
e2e.Logf("\ncpu usage of exceeded node: %s is %.2f%%", node, cpuUsage)
}
}
o.Expect(cpuExceededNodes).Should(o.BeEmpty(), "These nodes exceed max CPU usage allowed: %s", cpuExceededNodes)
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-39125-Verify that every node memory is sufficient", func() {
g.By("Running oc get nodes")
outOfMemoryNodes := []string{}
nodeNames, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred(), "Failed to execute oc get nodes")
nodes := strings.Fields(nodeNames)
for _, node := range nodes {
availMem := getNodeavailMem(oc, node)
e2e.Logf("\nAvailable mem of Node %s is %d", node, availMem)
if availMem < minRequiredMemoryInBytes {
outOfMemoryNodes = append(outOfMemoryNodes, node)
e2e.Logf("\nNode %s does not meet minimum required memory %s Bytes ", node, minRequiredMemoryInBytes)
}
}
o.Expect(outOfMemoryNodes).Should(o.BeEmpty(), "These nodes does not meet minimum required memory: %s", outOfMemoryNodes)
})
})
// var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_DEDICATED job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("baremetal-deployment-sanity", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
| package baremetal | ||||
test case | openshift/openshift-tests-private | f7f5db4c-8471-4bc6-a441-9845dae5a1d8 | Author:jhajyahy-Medium-29146-Verify that all clusteroperators are Available | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/deployment_sanity.go | g.It("Author:jhajyahy-Medium-29146-Verify that all clusteroperators are Available", func() {
g.By("Running oc get clusteroperators")
res, _ := checkOperatorsRunning(oc)
o.Expect(res).To(o.BeTrue())
}) | ||||||
test case | openshift/openshift-tests-private | 744bdbe6-2752-40f5-8256-44eee59930ca | Author:jhajyahy-Medium-29719-Verify that all nodes are up and running | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/deployment_sanity.go | g.It("Author:jhajyahy-Medium-29719-Verify that all nodes are up and running", func() {
g.By("Running oc get nodes")
res, _ := checkNodesRunning(oc)
o.Expect(res).To(o.BeTrue())
}) | ||||||
test case | openshift/openshift-tests-private | 5cc7312b-fa19-4d34-a603-dc666951a0bd | Author:jhajyahy-Medium-32361-Verify that deployment exists and is not empty | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/deployment_sanity.go | g.It("Author:jhajyahy-Medium-32361-Verify that deployment exists and is not empty", func() {
g.By("Create new namespace")
oc.SetupProject()
ns32361 := oc.Namespace()
g.By("Create deployment")
deployCreationErr := oc.Run("create").Args("deployment", "deploy32361", "-n", ns32361, "--image", "quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83").Execute()
o.Expect(deployCreationErr).NotTo(o.HaveOccurred())
g.By("Check deployment status is available")
waitForDeployStatus(oc, "deploy32361", ns32361, "True")
status, err := oc.AsAdmin().Run("get").Args("deployment", "-n", ns32361, "deploy32361", "-o=jsonpath={.status.conditions[?(@.type=='Available')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nDeployment %s Status is %s\n", "deploy32361", status)
o.Expect(status).To(o.Equal("True"))
g.By("Check pod is in Running state")
podName := getPodName(oc, ns32361)
podStatus := getPodStatus(oc, ns32361, podName)
o.Expect(podStatus).To(o.Equal("Running"))
}) | ||||||
test case | openshift/openshift-tests-private | 14bc0620-8ee6-4777-bc46-5bad358d1c24 | Author:jhajyahy-Medium-34195-Verify all pods replicas are running on workers only | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/deployment_sanity.go | g.It("Author:jhajyahy-Medium-34195-Verify all pods replicas are running on workers only", func() {
g.By("Create new namespace")
oc.SetupProject()
ns34195 := oc.Namespace()
g.By("Create deployment with num of workers + 1 replicas")
workerNodes, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
replicasNum := len(workerNodes) + 1
deployCreationErr := oc.Run("create").Args("deployment", "deploy34195", "-n", ns34195, fmt.Sprintf("--replicas=%d", replicasNum), "--image", "quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83").Execute()
o.Expect(deployCreationErr).NotTo(o.HaveOccurred())
waitForDeployStatus(oc, "deploy34195", ns34195, "True")
g.By("Check deployed pods number is as expected")
pods, err := oc.AsAdmin().Run("get").Args("pods", "-n", ns34195, "--field-selector=status.phase=Running", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podList := strings.Fields(pods)
o.Expect(len(podList)).To(o.Equal(replicasNum))
g.By("Check pods are deployed on worker nodes only")
for _, pod := range podList {
podNodeName, err := exutil.GetPodNodeName(oc, ns34195, pod)
o.Expect(err).NotTo(o.HaveOccurred())
res := exutil.IsWorkerNode(oc, podNodeName)
if !res {
e2e.Logf("\nPod %s was deployed on non worker node %s\n", pod, podNodeName)
}
o.Expect(res).To(o.BeTrue())
}
}) | |||||
test case | openshift/openshift-tests-private | 72d394d1-7f2b-4c46-b76a-5ec28867bfb5 | Author:jhajyahy-Medium-39126-Verify maximum CPU usage limit hasn't reached on each of the nodes | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/deployment_sanity.go | g.It("Author:jhajyahy-Medium-39126-Verify maximum CPU usage limit hasn't reached on each of the nodes", func() {
g.By("Running oc get nodes")
cpuExceededNodes := []string{}
sampling_time, err := getClusterUptime(oc)
o.Expect(err).NotTo(o.HaveOccurred())
nodeNames, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred(), "Failed to execute oc get nodes")
nodes := strings.Fields(nodeNames)
for _, node := range nodes {
cpuUsage := getNodeCpuUsage(oc, node, sampling_time)
if cpuUsage > maxCpuUsageAllowed {
cpuExceededNodes = append(cpuExceededNodes, node)
e2e.Logf("\ncpu usage of exceeded node: %s is %.2f%%", node, cpuUsage)
}
}
o.Expect(cpuExceededNodes).Should(o.BeEmpty(), "These nodes exceed max CPU usage allowed: %s", cpuExceededNodes)
}) | |||||
test case | openshift/openshift-tests-private | 5a86fb5d-be34-402a-b85c-b2e1f6990e41 | Author:jhajyahy-Medium-39125-Verify that every node memory is sufficient | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/deployment_sanity.go | g.It("Author:jhajyahy-Medium-39125-Verify that every node memory is sufficient", func() {
g.By("Running oc get nodes")
outOfMemoryNodes := []string{}
nodeNames, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(nodeErr).NotTo(o.HaveOccurred(), "Failed to execute oc get nodes")
nodes := strings.Fields(nodeNames)
for _, node := range nodes {
availMem := getNodeavailMem(oc, node)
e2e.Logf("\nAvailable mem of Node %s is %d", node, availMem)
if availMem < minRequiredMemoryInBytes {
outOfMemoryNodes = append(outOfMemoryNodes, node)
e2e.Logf("\nNode %s does not meet minimum required memory %s Bytes ", node, minRequiredMemoryInBytes)
}
}
o.Expect(outOfMemoryNodes).Should(o.BeEmpty(), "These nodes does not meet minimum required memory: %s", outOfMemoryNodes)
}) | |||||
test | openshift/openshift-tests-private | 2209588d-d120-47f0-a939-3b55ed9140de | additionalNTPServers | import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/additionalNTPServers.go | package baremetal
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_GENERAL job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_DEDICATED job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
// var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_GENERAL job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_GENERAL job on BareMetal", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("additional-ntp-servers", exutil.KubeConfigPath())
iaasPlatform string
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = exutil.CheckPlatform(oc)
if !(iaasPlatform == "baremetal") {
e2e.Logf("Cluster is: %s", iaasPlatform)
g.Skip("This feature is not supported for Non-baremetal cluster!")
}
})
// author: [email protected]
g.It("Author:sgoveas-NonPreRelease-Medium-79243-Check Additional NTP servers were added in install-config.yaml", func() {
exutil.By("1) Get the internal NTP server")
ntpHost := "aux-host-internal-name"
ntpFile := filepath.Join(os.Getenv(clusterProfileDir), ntpHost)
ntpServersList, err := ioutil.ReadFile(ntpFile)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2) Check additionalNTPServer was added to install-config.yaml")
installConfig, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", "kube-system", "cluster-config-v1", "-o=jsonpath={.data.install-config}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
yqCmd := fmt.Sprintf(`echo "%s" | yq .platform.baremetal.additionalNTPServers`, installConfig)
ntpList, err := exec.Command("bash", "-c", yqCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(string(ntpList), string(ntpServersList)) {
e2e.Failf("Additional NTP server was not added to install-config.yaml", err)
}
})
})
| package baremetal | ||||
test case | openshift/openshift-tests-private | 27bd28c0-3505-474e-abc7-32f295f92e8f | Author:sgoveas-NonPreRelease-Medium-79243-Check Additional NTP servers were added in install-config.yaml | ['"fmt"', '"io/ioutil"', '"os"', '"os/exec"', '"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/additionalNTPServers.go | g.It("Author:sgoveas-NonPreRelease-Medium-79243-Check Additional NTP servers were added in install-config.yaml", func() {
exutil.By("1) Get the internal NTP server")
ntpHost := "aux-host-internal-name"
ntpFile := filepath.Join(os.Getenv(clusterProfileDir), ntpHost)
ntpServersList, err := ioutil.ReadFile(ntpFile)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("2) Check additionalNTPServer was added to install-config.yaml")
installConfig, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", "kube-system", "cluster-config-v1", "-o=jsonpath={.data.install-config}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
yqCmd := fmt.Sprintf(`echo "%s" | yq .platform.baremetal.additionalNTPServers`, installConfig)
ntpList, err := exec.Command("bash", "-c", yqCmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(string(ntpList), string(ntpServersList)) {
e2e.Failf("Additional NTP server was not added to install-config.yaml", err)
}
}) | |||||
test | openshift/openshift-tests-private | 864cb27b-3a81-4c25-86e9-0ed4c1203ad7 | attach_non_bootable_iso | import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/attach_non_bootable_iso.go | package baremetal
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_GENERAL job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_DEDICATED job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
// var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_GENERAL job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_DEDICATED job on BareMetal", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("cluster-baremetal-operator", exutil.KubeConfigPath())
iaasPlatform string
BaseDir string
isoUrl string
nbIsoUrl string
nginxIngress string
redfishUrl string
curlImg string
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = exutil.CheckPlatform(oc)
if !(iaasPlatform == "baremetal") {
e2e.Logf("Cluster is: %s", iaasPlatform)
g.Skip("This feature is not supported for Non-baremetal cluster!")
}
exutil.By("1) Check cluster is setup with redfish driver")
bmhName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[2].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcAddressUrl, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.spec.bmc.address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(bmcAddressUrl, "redfish") {
g.Skip("Baremetal cluster node does not have redfish driver, skipping")
}
// Label worker node 2 to run the web-server hosting the iso
exutil.By("2) Add a label to second worker node ")
workerNode, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AddLabelToNode(oc, workerNode[1], "nginx-node", "true")
// nginx-iso.yaml contains the base64 content of a gzip iso
exutil.By("3) Create new project for nginx web-server.")
clusterDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ingress.config/cluster", "-o=jsonpath={.spec.domain}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
isoUrl = "nb-iso." + clusterDomain
nbIsoUrl = "http://" + isoUrl + "/non-bootable.iso"
oc.SetupProject()
testNamespace := oc.Namespace()
exutil.By("4) Create web-server to host the iso file")
BaseDir = exutil.FixturePath("testdata", "installer")
nginxIso := filepath.Join(BaseDir, "baremetal", "nginx-iso.yaml")
dcErr := oc.Run("create").Args("-f", nginxIso, "-n", testNamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
exutil.AssertPodToBeReady(oc, "nginx-pod", testNamespace)
exutil.By("5) Create ingress to access the iso file")
fileIngress := filepath.Join(BaseDir, "baremetal", "nginx-ingress.yaml")
nginxIngress = CopyToFile(fileIngress, "nginx-ingress.yaml")
defer os.Remove(nginxIngress)
exutil.ModifyYamlFileContent(nginxIngress, []exutil.YamlReplace{
{
Path: "spec.rules.0.host",
Value: isoUrl,
},
})
IngErr := oc.Run("create").Args("-f", nginxIngress, "-n", testNamespace).Execute()
o.Expect(IngErr).NotTo(o.HaveOccurred())
})
g.AfterEach(func() {
workerNode, _ := exutil.GetClusterNodesBy(oc, "worker")
exutil.DeleteLabelFromNode(oc, workerNode[1], "nginx-node")
})
// author: [email protected]
g.It("Author:sgoveas-Longduration-NonPreRelease-Medium-74737-Attach non-bootable iso to a master node [Disruptive]", func() {
exutil.By("6) Get baremetal host bmc credentials")
bmhName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[2].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcAddressUrl, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.spec.bmc.address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcCredFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.spec.bmc.credentialsName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcUser := getUserFromSecret(oc, machineAPINamespace, bmcCredFile)
bmcPass := getPassFromSecret(oc, machineAPINamespace, bmcCredFile)
exutil.By("7) Get redfish URL")
bmcAddress := strings.TrimPrefix(bmcAddressUrl, "redfish-virtualmedia://")
setIndex := strings.Index(bmcAddress, "/redfish")
if setIndex != -1 {
bmcAddress = bmcAddress[:setIndex]
}
bmcVendor, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.status.hardware.systemVendor.manufacturer}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(bmcVendor, "Dell") {
redfishUrl = fmt.Sprintf("https://%s:%s@%s/redfish/v1/Systems/System.Embedded.1/VirtualMedia/1", bmcUser, bmcPass, bmcAddress)
curlImg = "null"
} else if strings.Contains(bmcVendor, "HPE") {
redfishUrl = fmt.Sprintf("https://%s:%s@%s/redfish/v1/Managers/1/VirtualMedia/2", bmcUser, bmcPass, bmcAddress)
curlImg = "\"\""
} else {
e2e.Failf("Unable to form the redfish URL", err)
}
exutil.By("8) Check no dataImage exists")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dataImage", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).NotTo(o.ContainSubstring(bmhName))
exutil.By("9) Check if an Image is already attached to the node master-02")
setProxyEnv()
defer unsetProxyEnv()
cmdCurl := fmt.Sprintf(`curl --silent --insecure --request GET --url %s | jq '.Image'`, redfishUrl)
img, err := exec.Command("bash", "-c", cmdCurl).Output()
if err != nil {
e2e.Failf("Error:", err, string(img))
}
if !strings.Contains(string(img), curlImg) {
e2e.Logf("A Image is already attached, dataImage should override and attach itself", string(img))
} else {
e2e.Logf("No Image attached", string(img))
}
unsetProxyEnv()
exutil.By("10) Create dataImage 'master-02'")
masterNode, err := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err).NotTo(o.HaveOccurred())
cd := "/tmp/cdrom"
dataPath := filepath.Join(BaseDir, "baremetal", "non-bootable-iso.yaml")
dataPathCopy := CopyToFile(dataPath, "non-bootable-iso-master.yaml")
e2e.Logf("ISO URL: %s", nbIsoUrl)
exutil.ModifyYamlFileContent(dataPathCopy, []exutil.YamlReplace{
{
Path: "metadata.name",
Value: bmhName,
},
{
Path: "spec.url",
Value: nbIsoUrl,
},
})
defer func() {
exutil.By("15) Cleanup changes")
exutil.ModifyYamlFileContent(dataPathCopy, []exutil.YamlReplace{
{
Path: "spec",
Value: "url: \"\"",
},
})
_, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", dataPathCopy, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", bmhName, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
// poll for node status to change to NotReady
checkNodeStatus(oc, 5*time.Second, 80*time.Second, masterNode[2], "Unknown")
// poll for node status to change to Ready
checkNodeStatus(oc, 15*time.Second, 20*time.Minute, masterNode[2], "True")
_, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("dataImage/"+bmhName, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
cmdRm := `rm -fr %s %s`
cmdRm = fmt.Sprintf(cmdRm, cd, dataPathCopy)
_, err = exutil.DebugNodeWithChroot(oc, masterNode[2], "bash", "-c", cmdRm)
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", dataPathCopy, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("dataImage", "-n", machineAPINamespace, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring(bmhName))
exutil.By("11) Reboot baremtalhost 'master-02'")
out, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", bmhName, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("annotated"))
exutil.By("12) Waiting for the node to return to 'Ready' state")
// poll for node status to change to NotReady
checkNodeStatus(oc, 5*time.Second, 80*time.Second, masterNode[2], "Unknown")
// poll for node status to change to Ready
checkNodeStatus(oc, 15*time.Second, 20*time.Minute, masterNode[2], "True")
exutil.By("13) Check ISO image is attached to the node")
setProxyEnv()
defer unsetProxyEnv()
err = wait.Poll(15*time.Second, 60*time.Minute, func() (bool, error) {
img, err := exec.Command("bash", "-c", cmdCurl).Output()
if err != nil || !strings.Contains(string(img), ".iso") {
e2e.Logf("dataImage was not attached, Checking again", err)
return false, nil
}
if strings.Contains(string(img), ".iso") {
e2e.Logf("DataImage was attached")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "DataImage was not attached to the node as expected")
unsetProxyEnv()
exutil.By("14) Mount the iso image on the node to check contents")
cmdReadme := fmt.Sprintf(`mkdir %s;
mount -o loop /dev/sr0 %s;
cat %s/readme`, cd, cd, cd)
readMe, err := exutil.DebugNodeWithChroot(oc, masterNode[2], "bash", "-c", cmdReadme)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readMe).To(o.ContainSubstring("Non bootable ISO"))
})
// author: [email protected]
g.It("Author:sgoveas-Longduration-NonPreRelease-Medium-74736-Attach non-bootable iso to a worker node [Disruptive]", func() {
exutil.By("6) Get baremetal host bmc credentials")
bmhName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[3].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcAddressUrl, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.spec.bmc.address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcCredFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.spec.bmc.credentialsName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcUser := getUserFromSecret(oc, machineAPINamespace, bmcCredFile)
bmcPass := getPassFromSecret(oc, machineAPINamespace, bmcCredFile)
exutil.By("7) Get redfish URL")
bmcAddress := strings.TrimPrefix(bmcAddressUrl, "redfish-virtualmedia://")
setIndex := strings.Index(bmcAddress, "/redfish")
if setIndex != -1 {
bmcAddress = bmcAddress[:setIndex]
}
bmcVendor, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.status.hardware.systemVendor.manufacturer}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(bmcVendor, "Dell") {
redfishUrl = fmt.Sprintf("https://%s:%s@%s/redfish/v1/Systems/System.Embedded.1/VirtualMedia/1", bmcUser, bmcPass, bmcAddress)
curlImg = "null"
} else if strings.Contains(bmcVendor, "HPE") {
redfishUrl = fmt.Sprintf("https://%s:%s@%s/redfish/v1/Managers/1/VirtualMedia/2", bmcUser, bmcPass, bmcAddress)
curlImg = "\"\""
} else {
e2e.Failf("Unable to form the redfish URL", err)
}
exutil.By("8) Check no dataImage exists")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dataImage", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).NotTo(o.ContainSubstring(bmhName))
exutil.By("9) Check if an Image is already attached to the node worker-00")
setProxyEnv()
defer unsetProxyEnv()
cmdCurl := fmt.Sprintf(`curl --silent --insecure --request GET --url %s | jq '.Image'`, redfishUrl)
img, err := exec.Command("bash", "-c", cmdCurl).Output()
if err != nil {
e2e.Failf("Error:", err, string(img))
}
if !strings.Contains(string(img), curlImg) {
e2e.Logf("A Image is already attached, dataImage should override and attach itself", string(img))
} else {
e2e.Logf("No Image attached", string(img))
}
unsetProxyEnv()
exutil.By("10) Create dataImage 'worker-00'")
workerNode, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
cd := "/tmp/cdrom"
dataPath := filepath.Join(BaseDir, "baremetal", "non-bootable-iso.yaml")
dataPathCopy := CopyToFile(dataPath, "non-bootable-iso-worker.yaml")
e2e.Logf("ISO URL: %s", nbIsoUrl)
exutil.ModifyYamlFileContent(dataPathCopy, []exutil.YamlReplace{
{
Path: "metadata.name",
Value: bmhName,
},
{
Path: "spec.url",
Value: nbIsoUrl,
},
})
defer func() {
exutil.By("15) Cleanup changes")
exutil.ModifyYamlFileContent(dataPathCopy, []exutil.YamlReplace{
{
Path: "spec",
Value: "url: \"\"",
},
})
_, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", dataPathCopy, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", bmhName, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
// poll for node status to change to NotReady
checkNodeStatus(oc, 5*time.Second, 80*time.Second, workerNode[0], "Unknown")
// poll for node status to change to Ready
checkNodeStatus(oc, 15*time.Second, 20*time.Minute, workerNode[0], "True")
_, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("dataImage/"+bmhName, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
cmdRm := `rm -fr %s %s`
cmdRm = fmt.Sprintf(cmdRm, cd, dataPathCopy)
_, err = exutil.DebugNodeWithChroot(oc, workerNode[0], "bash", "-c", cmdRm)
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", dataPathCopy, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("dataImage", "-n", machineAPINamespace, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring(bmhName))
exutil.By("11) Reboot baremtalhost 'worker-00'")
out, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", bmhName, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("annotated"))
exutil.By("12) Waiting for the node to return to 'Ready' state")
// poll for node status to change to NotReady
checkNodeStatus(oc, 5*time.Second, 80*time.Second, workerNode[0], "Unknown")
// poll for node status to change to Ready
checkNodeStatus(oc, 15*time.Second, 20*time.Minute, workerNode[0], "True")
exutil.By("13) Check ISO image is attached to the node")
setProxyEnv()
defer unsetProxyEnv()
err = wait.Poll(5*time.Second, 60*time.Minute, func() (bool, error) {
img, err = exec.Command("bash", "-c", cmdCurl).Output()
if err != nil || !strings.Contains(string(img), ".iso") {
e2e.Logf("dataImage was not attached, Checking again", err)
return false, nil
}
if strings.Contains(string(img), ".iso") {
e2e.Logf("DataImage was attached")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "DataImage was not attached to the node as expected")
unsetProxyEnv()
exutil.By("14) Mount the iso image on the node to check contents")
cmdReadme := fmt.Sprintf(`mkdir %s;
mount -o loop /dev/sr0 %s;
cat %s/readme`, cd, cd, cd)
readMe, err := exutil.DebugNodeWithChroot(oc, workerNode[0], "bash", "-c", cmdReadme)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readMe).To(o.ContainSubstring("Non bootable ISO"))
})
})
| package baremetal | ||||
test case | openshift/openshift-tests-private | fa8e8039-5025-43ee-93f7-eaa7cff61aa7 | Author:sgoveas-Longduration-NonPreRelease-Medium-74737-Attach non-bootable iso to a master node [Disruptive] | ['"fmt"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/attach_non_bootable_iso.go | g.It("Author:sgoveas-Longduration-NonPreRelease-Medium-74737-Attach non-bootable iso to a master node [Disruptive]", func() {
exutil.By("6) Get baremetal host bmc credentials")
bmhName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[2].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcAddressUrl, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.spec.bmc.address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcCredFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.spec.bmc.credentialsName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcUser := getUserFromSecret(oc, machineAPINamespace, bmcCredFile)
bmcPass := getPassFromSecret(oc, machineAPINamespace, bmcCredFile)
exutil.By("7) Get redfish URL")
bmcAddress := strings.TrimPrefix(bmcAddressUrl, "redfish-virtualmedia://")
setIndex := strings.Index(bmcAddress, "/redfish")
if setIndex != -1 {
bmcAddress = bmcAddress[:setIndex]
}
bmcVendor, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.status.hardware.systemVendor.manufacturer}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(bmcVendor, "Dell") {
redfishUrl = fmt.Sprintf("https://%s:%s@%s/redfish/v1/Systems/System.Embedded.1/VirtualMedia/1", bmcUser, bmcPass, bmcAddress)
curlImg = "null"
} else if strings.Contains(bmcVendor, "HPE") {
redfishUrl = fmt.Sprintf("https://%s:%s@%s/redfish/v1/Managers/1/VirtualMedia/2", bmcUser, bmcPass, bmcAddress)
curlImg = "\"\""
} else {
e2e.Failf("Unable to form the redfish URL", err)
}
exutil.By("8) Check no dataImage exists")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dataImage", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).NotTo(o.ContainSubstring(bmhName))
exutil.By("9) Check if an Image is already attached to the node master-02")
setProxyEnv()
defer unsetProxyEnv()
cmdCurl := fmt.Sprintf(`curl --silent --insecure --request GET --url %s | jq '.Image'`, redfishUrl)
img, err := exec.Command("bash", "-c", cmdCurl).Output()
if err != nil {
e2e.Failf("Error:", err, string(img))
}
if !strings.Contains(string(img), curlImg) {
e2e.Logf("A Image is already attached, dataImage should override and attach itself", string(img))
} else {
e2e.Logf("No Image attached", string(img))
}
unsetProxyEnv()
exutil.By("10) Create dataImage 'master-02'")
masterNode, err := exutil.GetClusterNodesBy(oc, "master")
o.Expect(err).NotTo(o.HaveOccurred())
cd := "/tmp/cdrom"
dataPath := filepath.Join(BaseDir, "baremetal", "non-bootable-iso.yaml")
dataPathCopy := CopyToFile(dataPath, "non-bootable-iso-master.yaml")
e2e.Logf("ISO URL: %s", nbIsoUrl)
exutil.ModifyYamlFileContent(dataPathCopy, []exutil.YamlReplace{
{
Path: "metadata.name",
Value: bmhName,
},
{
Path: "spec.url",
Value: nbIsoUrl,
},
})
defer func() {
exutil.By("15) Cleanup changes")
exutil.ModifyYamlFileContent(dataPathCopy, []exutil.YamlReplace{
{
Path: "spec",
Value: "url: \"\"",
},
})
_, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", dataPathCopy, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", bmhName, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
// poll for node status to change to NotReady
checkNodeStatus(oc, 5*time.Second, 80*time.Second, masterNode[2], "Unknown")
// poll for node status to change to Ready
checkNodeStatus(oc, 15*time.Second, 20*time.Minute, masterNode[2], "True")
_, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("dataImage/"+bmhName, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
cmdRm := `rm -fr %s %s`
cmdRm = fmt.Sprintf(cmdRm, cd, dataPathCopy)
_, err = exutil.DebugNodeWithChroot(oc, masterNode[2], "bash", "-c", cmdRm)
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", dataPathCopy, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("dataImage", "-n", machineAPINamespace, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring(bmhName))
exutil.By("11) Reboot baremtalhost 'master-02'")
out, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", bmhName, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("annotated"))
exutil.By("12) Waiting for the node to return to 'Ready' state")
// poll for node status to change to NotReady
checkNodeStatus(oc, 5*time.Second, 80*time.Second, masterNode[2], "Unknown")
// poll for node status to change to Ready
checkNodeStatus(oc, 15*time.Second, 20*time.Minute, masterNode[2], "True")
exutil.By("13) Check ISO image is attached to the node")
setProxyEnv()
defer unsetProxyEnv()
err = wait.Poll(15*time.Second, 60*time.Minute, func() (bool, error) {
img, err := exec.Command("bash", "-c", cmdCurl).Output()
if err != nil || !strings.Contains(string(img), ".iso") {
e2e.Logf("dataImage was not attached, Checking again", err)
return false, nil
}
if strings.Contains(string(img), ".iso") {
e2e.Logf("DataImage was attached")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "DataImage was not attached to the node as expected")
unsetProxyEnv()
exutil.By("14) Mount the iso image on the node to check contents")
cmdReadme := fmt.Sprintf(`mkdir %s;
mount -o loop /dev/sr0 %s;
cat %s/readme`, cd, cd, cd)
readMe, err := exutil.DebugNodeWithChroot(oc, masterNode[2], "bash", "-c", cmdReadme)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readMe).To(o.ContainSubstring("Non bootable ISO"))
}) | |||||
test case | openshift/openshift-tests-private | 42238776-f9bd-4308-936b-570b0d95c9cc | Author:sgoveas-Longduration-NonPreRelease-Medium-74736-Attach non-bootable iso to a worker node [Disruptive] | ['"fmt"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/attach_non_bootable_iso.go | g.It("Author:sgoveas-Longduration-NonPreRelease-Medium-74736-Attach non-bootable iso to a worker node [Disruptive]", func() {
exutil.By("6) Get baremetal host bmc credentials")
bmhName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", "-n", machineAPINamespace, "-o=jsonpath={.items[3].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcAddressUrl, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.spec.bmc.address}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcCredFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.spec.bmc.credentialsName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
bmcUser := getUserFromSecret(oc, machineAPINamespace, bmcCredFile)
bmcPass := getPassFromSecret(oc, machineAPINamespace, bmcCredFile)
exutil.By("7) Get redfish URL")
bmcAddress := strings.TrimPrefix(bmcAddressUrl, "redfish-virtualmedia://")
setIndex := strings.Index(bmcAddress, "/redfish")
if setIndex != -1 {
bmcAddress = bmcAddress[:setIndex]
}
bmcVendor, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("baremetalhosts", bmhName, "-n", machineAPINamespace, "-o=jsonpath={.status.hardware.systemVendor.manufacturer}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(bmcVendor, "Dell") {
redfishUrl = fmt.Sprintf("https://%s:%s@%s/redfish/v1/Systems/System.Embedded.1/VirtualMedia/1", bmcUser, bmcPass, bmcAddress)
curlImg = "null"
} else if strings.Contains(bmcVendor, "HPE") {
redfishUrl = fmt.Sprintf("https://%s:%s@%s/redfish/v1/Managers/1/VirtualMedia/2", bmcUser, bmcPass, bmcAddress)
curlImg = "\"\""
} else {
e2e.Failf("Unable to form the redfish URL", err)
}
exutil.By("8) Check no dataImage exists")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dataImage", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).NotTo(o.ContainSubstring(bmhName))
exutil.By("9) Check if an Image is already attached to the node worker-00")
setProxyEnv()
defer unsetProxyEnv()
cmdCurl := fmt.Sprintf(`curl --silent --insecure --request GET --url %s | jq '.Image'`, redfishUrl)
img, err := exec.Command("bash", "-c", cmdCurl).Output()
if err != nil {
e2e.Failf("Error:", err, string(img))
}
if !strings.Contains(string(img), curlImg) {
e2e.Logf("A Image is already attached, dataImage should override and attach itself", string(img))
} else {
e2e.Logf("No Image attached", string(img))
}
unsetProxyEnv()
exutil.By("10) Create dataImage 'worker-00'")
workerNode, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
cd := "/tmp/cdrom"
dataPath := filepath.Join(BaseDir, "baremetal", "non-bootable-iso.yaml")
dataPathCopy := CopyToFile(dataPath, "non-bootable-iso-worker.yaml")
e2e.Logf("ISO URL: %s", nbIsoUrl)
exutil.ModifyYamlFileContent(dataPathCopy, []exutil.YamlReplace{
{
Path: "metadata.name",
Value: bmhName,
},
{
Path: "spec.url",
Value: nbIsoUrl,
},
})
defer func() {
exutil.By("15) Cleanup changes")
exutil.ModifyYamlFileContent(dataPathCopy, []exutil.YamlReplace{
{
Path: "spec",
Value: "url: \"\"",
},
})
_, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", dataPathCopy, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", bmhName, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
// poll for node status to change to NotReady
checkNodeStatus(oc, 5*time.Second, 80*time.Second, workerNode[0], "Unknown")
// poll for node status to change to Ready
checkNodeStatus(oc, 15*time.Second, 20*time.Minute, workerNode[0], "True")
_, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("dataImage/"+bmhName, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
cmdRm := `rm -fr %s %s`
cmdRm = fmt.Sprintf(cmdRm, cd, dataPathCopy)
_, err = exutil.DebugNodeWithChroot(oc, workerNode[0], "bash", "-c", cmdRm)
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", dataPathCopy, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("dataImage", "-n", machineAPINamespace, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring(bmhName))
exutil.By("11) Reboot baremtalhost 'worker-00'")
out, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", bmhName, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("annotated"))
exutil.By("12) Waiting for the node to return to 'Ready' state")
// poll for node status to change to NotReady
checkNodeStatus(oc, 5*time.Second, 80*time.Second, workerNode[0], "Unknown")
// poll for node status to change to Ready
checkNodeStatus(oc, 15*time.Second, 20*time.Minute, workerNode[0], "True")
exutil.By("13) Check ISO image is attached to the node")
setProxyEnv()
defer unsetProxyEnv()
err = wait.Poll(5*time.Second, 60*time.Minute, func() (bool, error) {
img, err = exec.Command("bash", "-c", cmdCurl).Output()
if err != nil || !strings.Contains(string(img), ".iso") {
e2e.Logf("dataImage was not attached, Checking again", err)
return false, nil
}
if strings.Contains(string(img), ".iso") {
e2e.Logf("DataImage was attached")
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "DataImage was not attached to the node as expected")
unsetProxyEnv()
exutil.By("14) Mount the iso image on the node to check contents")
cmdReadme := fmt.Sprintf(`mkdir %s;
mount -o loop /dev/sr0 %s;
cat %s/readme`, cd, cd, cd)
readMe, err := exutil.DebugNodeWithChroot(oc, workerNode[0], "bash", "-c", cmdReadme)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readMe).To(o.ContainSubstring("Non bootable ISO"))
}) | |||||
test | openshift/openshift-tests-private | 11e6e24e-b10c-4c4b-8580-3cfd5efe7806 | host_fw_components | import (
"fmt"
"os"
"path/filepath"
"strconv"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/host_fw_components.go | package baremetal
import (
"fmt"
"os"
"path/filepath"
"strconv"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_DEDICATED job on BareMetal", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("host-firmware-components", exutil.KubeConfigPath())
iaasPlatform string
dirname string
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = exutil.CheckPlatform(oc)
if !(iaasPlatform == "baremetal") {
e2e.Logf("Cluster is: %s", iaasPlatform)
g.Skip("For Non-baremetal cluster , this is not supported!")
}
})
// author: [email protected]
g.It("Author:jhajyahy-Longduration-NonPreRelease-Medium-75430-Update host FW via HostFirmwareComponents CRD [Disruptive]", func() {
dirname = "OCP-75430.log"
host, getBmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, "-o=jsonpath={.items[4].metadata.name}").Output()
o.Expect(getBmhErr).NotTo(o.HaveOccurred(), "Failed to get bmh name")
vendor, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, host, "-o=jsonpath={.status.hardware.firmware.bios.vendor}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
initialVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "-o=jsonpath={.status.components[1].currentVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
testNamespace := oc.Namespace()
downloadUrl, fileName := buildFirmwareURL(vendor, initialVersion)
// Label worker node 1 to run the web-server hosting the iso
exutil.By("Add a label to first worker node ")
workerNode, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AddLabelToNode(oc, workerNode[0], "nginx-node", "true")
exutil.By("Create web-server to host the fw file")
BaseDir := exutil.FixturePath("testdata", "installer")
fwConfigmap := filepath.Join(BaseDir, "baremetal", "firmware-cm.yaml")
nginxFW := filepath.Join(BaseDir, "baremetal", "nginx-firmware.yaml")
exutil.ModifyYamlFileContent(fwConfigmap, []exutil.YamlReplace{
{
Path: "data.firmware_url",
Value: downloadUrl,
},
})
dcErr := oc.Run("create").Args("-f", fwConfigmap, "-n", testNamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
dcErr = oc.Run("create").Args("-f", nginxFW, "-n", testNamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
exutil.AssertPodToBeReady(oc, "nginx-pod", testNamespace)
exutil.By("Create ingress to access the iso file")
fileIngress := filepath.Join(BaseDir, "baremetal", "nginx-ingress.yaml")
nginxIngress := CopyToFile(fileIngress, "nginx-ingress.yaml")
clusterDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ingress.config/cluster", "-o=jsonpath={.spec.domain}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
fwUrl := "fw." + clusterDomain
defer os.Remove(nginxIngress)
exutil.ModifyYamlFileContent(nginxIngress, []exutil.YamlReplace{
{
Path: "spec.rules.0.host",
Value: fwUrl,
},
})
IngErr := oc.Run("create").Args("-f", nginxIngress, "-n", testNamespace).Execute()
o.Expect(IngErr).NotTo(o.HaveOccurred())
exutil.By("Update HFC CRD")
component := "bmc"
hfcUrl := "http://" + fwUrl + "/" + fileName
patchConfig := fmt.Sprintf(`[{"op": "replace", "path": "/spec/updates", "value": [{"component":"%s","url":"%s"}]}]`, component, hfcUrl)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
bmcUrl, _ := oc.AsAdmin().Run("get").Args("-n", machineAPINamespace, "hostfirmwarecomponents", host, "-o=jsonpath={.spec.updates[0].url}").Output()
o.Expect(bmcUrl).Should(o.Equal(hfcUrl))
defer func() {
patchConfig := `[{"op": "replace", "path": "/spec/updates", "value": []}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.DeleteLabelFromNode(oc, workerNode[0], "nginx-node")
nodeHealthErr := clusterNodesHealthcheck(oc, 1500)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
}()
exutil.By("Get machine name of host")
machine, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, host, "-o=jsonpath={.spec.consumerRef.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Get the origin number of replicas
machineSet, cmdErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", "-n", machineAPINamespace, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
originReplicasStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", machineSet, "-n", machineAPINamespace, "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Annotate worker-01 machine for deletion")
_, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("machine", machine, "machine.openshift.io/cluster-api-delete-machine=yes", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Scale down machineset")
originReplicas, err := strconv.Atoi(originReplicasStr)
o.Expect(err).NotTo(o.HaveOccurred())
newReplicas := originReplicas - 1
_, err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%d", newReplicas)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitForBMHState(oc, host, "available")
defer func() {
currentReplicasStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", machineSet, "-n", machineAPINamespace, "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if currentReplicasStr != originReplicasStr {
_, err := oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%s", originReplicasStr)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeHealthErr := clusterNodesHealthcheck(oc, 1500)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
}
}()
exutil.By("Scale up machineset")
_, err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%s", originReplicasStr)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitForBMHState(oc, host, "provisioned")
nodeHealthErr := clusterNodesHealthcheck(oc, 1500)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
currentVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "-o=jsonpath={.status.components[1].currentVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentVersion).ShouldNot(o.Equal(initialVersion))
})
// author: [email protected]
g.It("Author:jhajyahy-Longduration-NonPreRelease-Medium-77676-DAY2 Update HFS via HostUpdatePolicy CRD [Disruptive]", func() {
dirname = "OCP-77676.log"
host, getBmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, "-o=jsonpath={.items[4].metadata.name}").Output()
o.Expect(getBmhErr).NotTo(o.HaveOccurred(), "Failed to get bmh name")
workerNode, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create host update policy")
BaseDir := exutil.FixturePath("testdata", "installer")
hostUpdatePolicy := filepath.Join(BaseDir, "baremetal", "host-update-policy.yaml")
exutil.ModifyYamlFileContent(hostUpdatePolicy, []exutil.YamlReplace{
{
Path: "metadata.name",
Value: host,
},
})
dcErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", hostUpdatePolicy, "-n", machineAPINamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("HostUpdatePolicy", "-n", machineAPINamespace, host).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
nodeHealthErr := clusterNodesHealthcheck(oc, 3000)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
}()
exutil.By("Update HFS setting based on vendor")
vendor, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, host, "-o=jsonpath={.status.hardware.firmware.bios.vendor}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
hfs, value, err := getHfsByVendor(oc, vendor, machineAPINamespace, host)
o.Expect(err).NotTo(o.HaveOccurred())
patchConfig := fmt.Sprintf(`[{"op": "replace", "path": "/spec/settings/%s", "value": "%s"}]`, hfs, value)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("hfs", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
defer func() {
patchConfig := `[{"op": "replace", "path": "/spec/settings", "value": {}}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("hfs", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
}()
specModified, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hfs", "-n", machineAPINamespace, host, fmt.Sprintf("-o=jsonpath={.spec.settings.%s}", hfs)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(specModified).Should(o.Equal(value))
exutil.By("Reboot baremtalhost worker-01")
out, err := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", host, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("annotated"))
exutil.By("Waiting for the node to return to 'Ready' state")
// poll for node status to change to NotReady
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", workerNode[1], "-o=jsonpath={.status.conditions[3].status}").Output()
if err != nil || string(output) == "True" {
e2e.Logf("Node is available, status: %s. Trying again", output)
return false, nil
}
if string(output) == "Unknown" {
e2e.Logf("Node is Ready, status: %s", output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Node did not change state as expected")
nodeHealthErr := clusterNodesHealthcheck(oc, 3000)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
exutil.By("Verify hfs setting was actually changed")
statusModified, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hfs", "-n", machineAPINamespace, host, fmt.Sprintf("-o=jsonpath={.status.settings.%s}", hfs)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(statusModified).Should(o.Equal(specModified))
})
// author: [email protected]
g.It("Author:jhajyahy-Longduration-NonPreRelease-Medium-78361-DAY2 Update host FW via HostFirmwareComponents CRD [Disruptive]", func() {
dirname = "OCP-78361.log"
host, getBmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, "-o=jsonpath={.items[4].metadata.name}").Output()
o.Expect(getBmhErr).NotTo(o.HaveOccurred(), "Failed to get bmh name")
vendor, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, host, "-o=jsonpath={.status.hardware.firmware.bios.vendor}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
initialVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "-o=jsonpath={.status.components[1].currentVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create host update policy")
BaseDir := exutil.FixturePath("testdata", "installer")
hostUpdatePolicy := filepath.Join(BaseDir, "baremetal", "host-update-policy.yaml")
exutil.ModifyYamlFileContent(hostUpdatePolicy, []exutil.YamlReplace{
{
Path: "metadata.name",
Value: host,
},
})
dcErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", hostUpdatePolicy, "-n", machineAPINamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("HostUpdatePolicy", "-n", machineAPINamespace, host).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
nodeHealthErr := clusterNodesHealthcheck(oc, 3000)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
}()
oc.SetupProject()
testNamespace := oc.Namespace()
downloadUrl, fileName := buildFirmwareURL(vendor, initialVersion)
// Label worker node 1 to run the web-server hosting the iso
exutil.By("Add a label to first worker node ")
workerNode, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AddLabelToNode(oc, workerNode[0], "nginx-node", "true")
exutil.By("Create web-server to host the fw file")
BaseDir = exutil.FixturePath("testdata", "installer")
fwConfigmap := filepath.Join(BaseDir, "baremetal", "firmware-cm.yaml")
nginxFW := filepath.Join(BaseDir, "baremetal", "nginx-firmware.yaml")
exutil.ModifyYamlFileContent(fwConfigmap, []exutil.YamlReplace{
{
Path: "data.firmware_url",
Value: downloadUrl,
},
})
dcErr = oc.Run("create").Args("-f", fwConfigmap, "-n", testNamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
dcErr = oc.Run("create").Args("-f", nginxFW, "-n", testNamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
exutil.AssertPodToBeReady(oc, "nginx-pod", testNamespace)
exutil.By("Create ingress to access the iso file")
fileIngress := filepath.Join(BaseDir, "baremetal", "nginx-ingress.yaml")
nginxIngress := CopyToFile(fileIngress, "nginx-ingress.yaml")
clusterDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ingress.config/cluster", "-o=jsonpath={.spec.domain}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
fwUrl := "fw." + clusterDomain
defer os.Remove(nginxIngress)
exutil.ModifyYamlFileContent(nginxIngress, []exutil.YamlReplace{
{
Path: "spec.rules.0.host",
Value: fwUrl,
},
})
IngErr := oc.Run("create").Args("-f", nginxIngress, "-n", testNamespace).Execute()
o.Expect(IngErr).NotTo(o.HaveOccurred())
exutil.By("Update HFC CRD")
component := "bmc"
hfcUrl := "http://" + fwUrl + "/" + fileName
patchConfig := fmt.Sprintf(`[{"op": "replace", "path": "/spec/updates", "value": [{"component":"%s","url":"%s"}]}]`, component, hfcUrl)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
bmcUrl, _ := oc.AsAdmin().Run("get").Args("-n", machineAPINamespace, "hostfirmwarecomponents", host, "-o=jsonpath={.spec.updates[0].url}").Output()
o.Expect(bmcUrl).Should(o.Equal(hfcUrl))
defer func() {
patchConfig := `[{"op": "replace", "path": "/spec/updates", "value": []}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.DeleteLabelFromNode(oc, workerNode[0], "nginx-node")
nodeHealthErr := clusterNodesHealthcheck(oc, 3000)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
}()
g.By("Reboot baremtalhost 'worker-01'")
out, err := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", host, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("annotated"))
g.By("Waiting for the node to return to 'Ready' state")
// poll for node status to change to NotReady
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", workerNode[1], "-o=jsonpath={.status.conditions[3].status}").Output()
if err != nil || string(output) == "True" {
e2e.Logf("Node is available, status: %s. Trying again", output)
return false, nil
}
if string(output) == "Unknown" {
e2e.Logf("Node is Ready, status: %s", output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Node did not change state as expected")
nodeHealthErr := clusterNodesHealthcheck(oc, 3000)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
currentVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "-o=jsonpath={.status.components[1].currentVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentVersion).ShouldNot(o.Equal(initialVersion))
})
})
| package baremetal | ||||
test case | openshift/openshift-tests-private | 0eadb7a9-86b9-4ca6-96fe-ecad1a46e474 | Author:jhajyahy-Longduration-NonPreRelease-Medium-75430-Update host FW via HostFirmwareComponents CRD [Disruptive] | ['"fmt"', '"os"', '"path/filepath"', '"strconv"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/host_fw_components.go | g.It("Author:jhajyahy-Longduration-NonPreRelease-Medium-75430-Update host FW via HostFirmwareComponents CRD [Disruptive]", func() {
dirname = "OCP-75430.log"
host, getBmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, "-o=jsonpath={.items[4].metadata.name}").Output()
o.Expect(getBmhErr).NotTo(o.HaveOccurred(), "Failed to get bmh name")
vendor, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, host, "-o=jsonpath={.status.hardware.firmware.bios.vendor}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
initialVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "-o=jsonpath={.status.components[1].currentVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
testNamespace := oc.Namespace()
downloadUrl, fileName := buildFirmwareURL(vendor, initialVersion)
// Label worker node 1 to run the web-server hosting the iso
exutil.By("Add a label to first worker node ")
workerNode, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AddLabelToNode(oc, workerNode[0], "nginx-node", "true")
exutil.By("Create web-server to host the fw file")
BaseDir := exutil.FixturePath("testdata", "installer")
fwConfigmap := filepath.Join(BaseDir, "baremetal", "firmware-cm.yaml")
nginxFW := filepath.Join(BaseDir, "baremetal", "nginx-firmware.yaml")
exutil.ModifyYamlFileContent(fwConfigmap, []exutil.YamlReplace{
{
Path: "data.firmware_url",
Value: downloadUrl,
},
})
dcErr := oc.Run("create").Args("-f", fwConfigmap, "-n", testNamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
dcErr = oc.Run("create").Args("-f", nginxFW, "-n", testNamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
exutil.AssertPodToBeReady(oc, "nginx-pod", testNamespace)
exutil.By("Create ingress to access the iso file")
fileIngress := filepath.Join(BaseDir, "baremetal", "nginx-ingress.yaml")
nginxIngress := CopyToFile(fileIngress, "nginx-ingress.yaml")
clusterDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ingress.config/cluster", "-o=jsonpath={.spec.domain}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
fwUrl := "fw." + clusterDomain
defer os.Remove(nginxIngress)
exutil.ModifyYamlFileContent(nginxIngress, []exutil.YamlReplace{
{
Path: "spec.rules.0.host",
Value: fwUrl,
},
})
IngErr := oc.Run("create").Args("-f", nginxIngress, "-n", testNamespace).Execute()
o.Expect(IngErr).NotTo(o.HaveOccurred())
exutil.By("Update HFC CRD")
component := "bmc"
hfcUrl := "http://" + fwUrl + "/" + fileName
patchConfig := fmt.Sprintf(`[{"op": "replace", "path": "/spec/updates", "value": [{"component":"%s","url":"%s"}]}]`, component, hfcUrl)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
bmcUrl, _ := oc.AsAdmin().Run("get").Args("-n", machineAPINamespace, "hostfirmwarecomponents", host, "-o=jsonpath={.spec.updates[0].url}").Output()
o.Expect(bmcUrl).Should(o.Equal(hfcUrl))
defer func() {
patchConfig := `[{"op": "replace", "path": "/spec/updates", "value": []}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.DeleteLabelFromNode(oc, workerNode[0], "nginx-node")
nodeHealthErr := clusterNodesHealthcheck(oc, 1500)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
}()
exutil.By("Get machine name of host")
machine, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, host, "-o=jsonpath={.spec.consumerRef.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Get the origin number of replicas
machineSet, cmdErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", "-n", machineAPINamespace, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
originReplicasStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", machineSet, "-n", machineAPINamespace, "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Annotate worker-01 machine for deletion")
_, err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("machine", machine, "machine.openshift.io/cluster-api-delete-machine=yes", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Scale down machineset")
originReplicas, err := strconv.Atoi(originReplicasStr)
o.Expect(err).NotTo(o.HaveOccurred())
newReplicas := originReplicas - 1
_, err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%d", newReplicas)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitForBMHState(oc, host, "available")
defer func() {
currentReplicasStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", machineSet, "-n", machineAPINamespace, "-o=jsonpath={.spec.replicas}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if currentReplicasStr != originReplicasStr {
_, err := oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%s", originReplicasStr)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeHealthErr := clusterNodesHealthcheck(oc, 1500)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
}
}()
exutil.By("Scale up machineset")
_, err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("machineset", machineSet, "-n", machineAPINamespace, fmt.Sprintf("--replicas=%s", originReplicasStr)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
waitForBMHState(oc, host, "provisioned")
nodeHealthErr := clusterNodesHealthcheck(oc, 1500)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
currentVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "-o=jsonpath={.status.components[1].currentVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentVersion).ShouldNot(o.Equal(initialVersion))
}) | |||||
test case | openshift/openshift-tests-private | a00875b4-2113-44a9-94e2-b07652b2a91d | Author:jhajyahy-Longduration-NonPreRelease-Medium-77676-DAY2 Update HFS via HostUpdatePolicy CRD [Disruptive] | ['"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/host_fw_components.go | g.It("Author:jhajyahy-Longduration-NonPreRelease-Medium-77676-DAY2 Update HFS via HostUpdatePolicy CRD [Disruptive]", func() {
dirname = "OCP-77676.log"
host, getBmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, "-o=jsonpath={.items[4].metadata.name}").Output()
o.Expect(getBmhErr).NotTo(o.HaveOccurred(), "Failed to get bmh name")
workerNode, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create host update policy")
BaseDir := exutil.FixturePath("testdata", "installer")
hostUpdatePolicy := filepath.Join(BaseDir, "baremetal", "host-update-policy.yaml")
exutil.ModifyYamlFileContent(hostUpdatePolicy, []exutil.YamlReplace{
{
Path: "metadata.name",
Value: host,
},
})
dcErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", hostUpdatePolicy, "-n", machineAPINamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("HostUpdatePolicy", "-n", machineAPINamespace, host).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
nodeHealthErr := clusterNodesHealthcheck(oc, 3000)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
}()
exutil.By("Update HFS setting based on vendor")
vendor, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, host, "-o=jsonpath={.status.hardware.firmware.bios.vendor}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
hfs, value, err := getHfsByVendor(oc, vendor, machineAPINamespace, host)
o.Expect(err).NotTo(o.HaveOccurred())
patchConfig := fmt.Sprintf(`[{"op": "replace", "path": "/spec/settings/%s", "value": "%s"}]`, hfs, value)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("hfs", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
defer func() {
patchConfig := `[{"op": "replace", "path": "/spec/settings", "value": {}}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("hfs", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
}()
specModified, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hfs", "-n", machineAPINamespace, host, fmt.Sprintf("-o=jsonpath={.spec.settings.%s}", hfs)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(specModified).Should(o.Equal(value))
exutil.By("Reboot baremtalhost worker-01")
out, err := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", host, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("annotated"))
exutil.By("Waiting for the node to return to 'Ready' state")
// poll for node status to change to NotReady
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", workerNode[1], "-o=jsonpath={.status.conditions[3].status}").Output()
if err != nil || string(output) == "True" {
e2e.Logf("Node is available, status: %s. Trying again", output)
return false, nil
}
if string(output) == "Unknown" {
e2e.Logf("Node is Ready, status: %s", output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Node did not change state as expected")
nodeHealthErr := clusterNodesHealthcheck(oc, 3000)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
exutil.By("Verify hfs setting was actually changed")
statusModified, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("hfs", "-n", machineAPINamespace, host, fmt.Sprintf("-o=jsonpath={.status.settings.%s}", hfs)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(statusModified).Should(o.Equal(specModified))
}) | |||||
test case | openshift/openshift-tests-private | c80e3c45-ef73-4bd5-95e3-4dc3707e5749 | Author:jhajyahy-Longduration-NonPreRelease-Medium-78361-DAY2 Update host FW via HostFirmwareComponents CRD [Disruptive] | ['"fmt"', '"os"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/host_fw_components.go | g.It("Author:jhajyahy-Longduration-NonPreRelease-Medium-78361-DAY2 Update host FW via HostFirmwareComponents CRD [Disruptive]", func() {
dirname = "OCP-78361.log"
host, getBmhErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, "-o=jsonpath={.items[4].metadata.name}").Output()
o.Expect(getBmhErr).NotTo(o.HaveOccurred(), "Failed to get bmh name")
vendor, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, host, "-o=jsonpath={.status.hardware.firmware.bios.vendor}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
initialVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "-o=jsonpath={.status.components[1].currentVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create host update policy")
BaseDir := exutil.FixturePath("testdata", "installer")
hostUpdatePolicy := filepath.Join(BaseDir, "baremetal", "host-update-policy.yaml")
exutil.ModifyYamlFileContent(hostUpdatePolicy, []exutil.YamlReplace{
{
Path: "metadata.name",
Value: host,
},
})
dcErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", hostUpdatePolicy, "-n", machineAPINamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
defer func() {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("HostUpdatePolicy", "-n", machineAPINamespace, host).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
nodeHealthErr := clusterNodesHealthcheck(oc, 3000)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
}()
oc.SetupProject()
testNamespace := oc.Namespace()
downloadUrl, fileName := buildFirmwareURL(vendor, initialVersion)
// Label worker node 1 to run the web-server hosting the iso
exutil.By("Add a label to first worker node ")
workerNode, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.AddLabelToNode(oc, workerNode[0], "nginx-node", "true")
exutil.By("Create web-server to host the fw file")
BaseDir = exutil.FixturePath("testdata", "installer")
fwConfigmap := filepath.Join(BaseDir, "baremetal", "firmware-cm.yaml")
nginxFW := filepath.Join(BaseDir, "baremetal", "nginx-firmware.yaml")
exutil.ModifyYamlFileContent(fwConfigmap, []exutil.YamlReplace{
{
Path: "data.firmware_url",
Value: downloadUrl,
},
})
dcErr = oc.Run("create").Args("-f", fwConfigmap, "-n", testNamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
dcErr = oc.Run("create").Args("-f", nginxFW, "-n", testNamespace).Execute()
o.Expect(dcErr).NotTo(o.HaveOccurred())
exutil.AssertPodToBeReady(oc, "nginx-pod", testNamespace)
exutil.By("Create ingress to access the iso file")
fileIngress := filepath.Join(BaseDir, "baremetal", "nginx-ingress.yaml")
nginxIngress := CopyToFile(fileIngress, "nginx-ingress.yaml")
clusterDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ingress.config/cluster", "-o=jsonpath={.spec.domain}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
fwUrl := "fw." + clusterDomain
defer os.Remove(nginxIngress)
exutil.ModifyYamlFileContent(nginxIngress, []exutil.YamlReplace{
{
Path: "spec.rules.0.host",
Value: fwUrl,
},
})
IngErr := oc.Run("create").Args("-f", nginxIngress, "-n", testNamespace).Execute()
o.Expect(IngErr).NotTo(o.HaveOccurred())
exutil.By("Update HFC CRD")
component := "bmc"
hfcUrl := "http://" + fwUrl + "/" + fileName
patchConfig := fmt.Sprintf(`[{"op": "replace", "path": "/spec/updates", "value": [{"component":"%s","url":"%s"}]}]`, component, hfcUrl)
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
bmcUrl, _ := oc.AsAdmin().Run("get").Args("-n", machineAPINamespace, "hostfirmwarecomponents", host, "-o=jsonpath={.spec.updates[0].url}").Output()
o.Expect(bmcUrl).Should(o.Equal(hfcUrl))
defer func() {
patchConfig := `[{"op": "replace", "path": "/spec/updates", "value": []}]`
patchErr := oc.AsAdmin().WithoutNamespace().Run("patch").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "--type=json", "-p", patchConfig).Execute()
o.Expect(patchErr).NotTo(o.HaveOccurred())
exutil.DeleteLabelFromNode(oc, workerNode[0], "nginx-node")
nodeHealthErr := clusterNodesHealthcheck(oc, 3000)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
}()
g.By("Reboot baremtalhost 'worker-01'")
out, err := oc.AsAdmin().WithoutNamespace().Run("annotate").Args("baremetalhosts", host, "reboot.metal3.io=", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("annotated"))
g.By("Waiting for the node to return to 'Ready' state")
// poll for node status to change to NotReady
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", workerNode[1], "-o=jsonpath={.status.conditions[3].status}").Output()
if err != nil || string(output) == "True" {
e2e.Logf("Node is available, status: %s. Trying again", output)
return false, nil
}
if string(output) == "Unknown" {
e2e.Logf("Node is Ready, status: %s", output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Node did not change state as expected")
nodeHealthErr := clusterNodesHealthcheck(oc, 3000)
exutil.AssertWaitPollNoErr(nodeHealthErr, "Cluster did not recover in time!")
clusterOperatorHealthcheckErr := clusterOperatorHealthcheck(oc, 1500, dirname)
exutil.AssertWaitPollNoErr(clusterOperatorHealthcheckErr, "Cluster operators did not recover in time!")
currentVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HostFirmwareComponents", "-n", machineAPINamespace, host, "-o=jsonpath={.status.components[1].currentVersion}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(currentVersion).ShouldNot(o.Equal(initialVersion))
}) | |||||
test | openshift/openshift-tests-private | 4185a436-9d8f-4f94-bc63-78c6dc41a7e9 | ironic_auth | import (
"encoding/base64"
"fmt"
"os/exec"
"regexp"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/ironic_auth.go | package baremetal
import (
"encoding/base64"
"fmt"
"os/exec"
"regexp"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_GENERAL job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("baremetal-ironic-authentication", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
// var _ = g.Describe("[sig-baremetal] INSTALLER UPI for INSTALLER_DEDICATED job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("baremetal-ironic-authentication", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_GENERAL job on BareMetal", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("baremetal-ironic-authentication", exutil.KubeConfigPath())
iaasPlatform string
endpointIP []string
encodedUserPass string
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = exutil.CheckPlatform(oc)
if !(iaasPlatform == "baremetal") {
e2e.Logf("Cluster is: %s", iaasPlatform)
g.Skip("For Non-baremetal cluster , this is not supported!")
}
user := getUserFromSecret(oc, machineAPINamespace, "metal3-ironic-password")
pass := getPassFromSecret(oc, machineAPINamespace, "metal3-ironic-password")
encodedUserPass = base64.StdEncoding.EncodeToString([]byte(user + ":" + pass))
metal3Pod, err := oc.AsAdmin().Run("get").Args("-n", machineAPINamespace, "pods", "-l baremetal.openshift.io/cluster-baremetal-operator=metal3-state", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
endpoint, err := oc.AsAdmin().Run("exec").Args("-n", machineAPINamespace, metal3Pod, "-c", "metal3-ironic", "--", "cat", "/etc/ironic/ironic.conf").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
re := regexp.MustCompile(`public_endpoint\s*=\s*https://(\d+\.\d+\.\d+\.\d+:\d+)`)
endpointIP = re.FindStringSubmatch(endpoint)
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-40655-An unauthenticated user can't do actions in the ironic-api when using --insecure flag with https", func() {
curlCmd := `curl -k -I -X get "https://%s/v1/nodes"`
formattedCmd := fmt.Sprintf(curlCmd, endpointIP[1])
out, cmdErr := exec.Command("bash", "-c", formattedCmd).Output()
o.Expect(cmdErr).ShouldNot(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("HTTP/1.1 401 Unauthorized"))
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-40560-An unauthenticated user can't do actions in the ironic-api when using http", func() {
curlCmd := `curl -I -X get "http://%s/v1/nodes"`
formattedCmd := fmt.Sprintf(curlCmd, endpointIP[1])
out, cmdErr := exec.Command("bash", "-c", formattedCmd).Output()
o.Expect(cmdErr).Should(o.HaveOccurred())
o.Expect(out).ShouldNot(o.ContainSubstring("HTTP/1.1 200 OK"))
o.Expect(cmdErr.Error()).Should(o.ContainSubstring("exit status 52"))
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-40561-An authenticated user can't do actions in the ironic-api when using http", func() {
curlCmd := `curl -I -X get --header "Authorization: Basic %s" "http://%s/v1/nodes"`
formattedCmd := fmt.Sprintf(curlCmd, encodedUserPass, endpointIP[1])
out, cmdErr := exec.Command("bash", "-c", formattedCmd).Output()
o.Expect(cmdErr).Should(o.HaveOccurred())
o.Expect(out).ShouldNot(o.ContainSubstring("HTTP/1.1 200 OK"))
o.Expect(cmdErr.Error()).Should(o.ContainSubstring("exit status 52"))
})
// author: [email protected]
g.It("Author:jhajyahy-Medium-40562-An authenticated user can do actions in the ironic-api when using --insecure flag with https", func() {
curlCmd := `curl -I -k -X get --header "Authorization: Basic %s" "https://%s/v1/nodes"`
formattedCmd := fmt.Sprintf(curlCmd, encodedUserPass, endpointIP[1])
out, cmdErr := exec.Command("bash", "-c", formattedCmd).Output()
o.Expect(cmdErr).ShouldNot(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("HTTP/1.1 200 OK"))
})
})
// var _ = g.Describe("[sig-baremetal] INSTALLER IPI for INSTALLER_DEDICATED job on BareMetal", func() {
// defer g.GinkgoRecover()
// var (
// oc = exutil.NewCLI("baremetal-ironic-authentication", exutil.KubeConfigPath())
// )
// g.BeforeEach(func() {
// })
// g.AfterEach(func() {
// })
// // author: [email protected]
// g.It("Author:sgoveas--Medium-12345-example case", func() {
// })
// })
| package baremetal | ||||
test case | openshift/openshift-tests-private | 6d17fbca-d0a4-44eb-8c48-767b9ca464e0 | Author:jhajyahy-Medium-40655-An unauthenticated user can't do actions in the ironic-api when using --insecure flag with https | ['"fmt"', '"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/ironic_auth.go | g.It("Author:jhajyahy-Medium-40655-An unauthenticated user can't do actions in the ironic-api when using --insecure flag with https", func() {
curlCmd := `curl -k -I -X get "https://%s/v1/nodes"`
formattedCmd := fmt.Sprintf(curlCmd, endpointIP[1])
out, cmdErr := exec.Command("bash", "-c", formattedCmd).Output()
o.Expect(cmdErr).ShouldNot(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("HTTP/1.1 401 Unauthorized"))
}) | |||||
test case | openshift/openshift-tests-private | 849f1724-5b70-49aa-a73c-38a6d070db52 | Author:jhajyahy-Medium-40560-An unauthenticated user can't do actions in the ironic-api when using http | ['"fmt"', '"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/ironic_auth.go | g.It("Author:jhajyahy-Medium-40560-An unauthenticated user can't do actions in the ironic-api when using http", func() {
curlCmd := `curl -I -X get "http://%s/v1/nodes"`
formattedCmd := fmt.Sprintf(curlCmd, endpointIP[1])
out, cmdErr := exec.Command("bash", "-c", formattedCmd).Output()
o.Expect(cmdErr).Should(o.HaveOccurred())
o.Expect(out).ShouldNot(o.ContainSubstring("HTTP/1.1 200 OK"))
o.Expect(cmdErr.Error()).Should(o.ContainSubstring("exit status 52"))
}) | |||||
test case | openshift/openshift-tests-private | e37d6acb-70ab-4268-aced-2eba650ba339 | Author:jhajyahy-Medium-40561-An authenticated user can't do actions in the ironic-api when using http | ['"fmt"', '"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/ironic_auth.go | g.It("Author:jhajyahy-Medium-40561-An authenticated user can't do actions in the ironic-api when using http", func() {
curlCmd := `curl -I -X get --header "Authorization: Basic %s" "http://%s/v1/nodes"`
formattedCmd := fmt.Sprintf(curlCmd, encodedUserPass, endpointIP[1])
out, cmdErr := exec.Command("bash", "-c", formattedCmd).Output()
o.Expect(cmdErr).Should(o.HaveOccurred())
o.Expect(out).ShouldNot(o.ContainSubstring("HTTP/1.1 200 OK"))
o.Expect(cmdErr.Error()).Should(o.ContainSubstring("exit status 52"))
}) | |||||
test case | openshift/openshift-tests-private | 1d0d7f36-efa2-4935-b920-69e1fb6bcfa3 | Author:jhajyahy-Medium-40562-An authenticated user can do actions in the ironic-api when using --insecure flag with https | ['"fmt"', '"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/ironic_auth.go | g.It("Author:jhajyahy-Medium-40562-An authenticated user can do actions in the ironic-api when using --insecure flag with https", func() {
curlCmd := `curl -I -k -X get --header "Authorization: Basic %s" "https://%s/v1/nodes"`
formattedCmd := fmt.Sprintf(curlCmd, encodedUserPass, endpointIP[1])
out, cmdErr := exec.Command("bash", "-c", formattedCmd).Output()
o.Expect(cmdErr).ShouldNot(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("HTTP/1.1 200 OK"))
}) | |||||
file | openshift/openshift-tests-private | 2141dd97-381c-4230-aead-7b7e492f6a01 | utils | import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | package baremetal
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
const (
machineAPINamespace = "openshift-machine-api"
maxCpuUsageAllowed float64 = 90.0
minRequiredMemoryInBytes = 1000000000
clusterProfileDir = "CLUSTER_PROFILE_DIR"
proxyFile = "proxy"
)
type Response struct {
Status string `json:"status"`
Data struct {
ResultType string `json:"resultType"`
Result []struct {
Metric struct {
Instance string `json:"instance"`
} `json:"metric"`
Value []interface{} `json:"value"`
} `json:"result"`
} `json:"data"`
}
func checkOperatorsRunning(oc *exutil.CLI) (bool, error) {
jpath := `{range .items[*]}{.metadata.name}:{.status.conditions[?(@.type=='Available')].status}{':'}{.status.conditions[?(@.type=='Degraded')].status}{'\n'}{end}`
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperators.config.openshift.io", "-o", "jsonpath="+jpath).Output()
if err != nil {
return false, fmt.Errorf("failed to execute 'oc get clusteroperators.config.openshift.io' command: %v", err)
}
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
for _, line := range lines {
e2e.Logf("%s", line)
parts := strings.Split(line, ":")
available := parts[1] == "True"
degraded := parts[2] == "False"
if !available || !degraded {
return false, nil
}
}
return true, nil
}
func checkNodesRunning(oc *exutil.CLI) (bool, error) {
nodeNames, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
if nodeErr != nil {
return false, fmt.Errorf("failed to execute 'oc get nodes' command: %v", nodeErr)
}
nodes := strings.Fields(nodeNames)
e2e.Logf("\nNode Names are %v", nodeNames)
for _, node := range nodes {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
if statusErr != nil {
return false, fmt.Errorf("failed to execute 'oc get nodes' command: %v", statusErr)
}
e2e.Logf("\nNode %s Status is %s\n", node, nodeStatus)
if nodeStatus != "True" {
return false, nil
}
}
return true, nil
}
func waitForDeployStatus(oc *exutil.CLI, depName string, nameSpace string, depStatus string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (bool, error) {
statusOp, err := oc.AsAdmin().Run("get").Args("-n", nameSpace, "deployment", depName, "-o=jsonpath={.status.conditions[?(@.type=='Available')].status}'").Output()
if err != nil {
return false, err
}
if strings.Contains(statusOp, depStatus) {
e2e.Logf("Deployment %v state is %v", depName, depStatus)
return true, nil
}
e2e.Logf("deployment %v is state %v, Trying again", depName, statusOp)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The test deployment job is not running")
}
func getPodName(oc *exutil.CLI, ns string) string {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nPod Name is %v", podName)
return podName
}
func getPodStatus(oc *exutil.CLI, namespace string, podName string) string {
podStatus, err := oc.AsAdmin().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The pod %s status is %q", podName, podStatus)
return podStatus
}
func getNodeCpuUsage(oc *exutil.CLI, node string, sampling_time int) float64 {
samplingTime := strconv.Itoa(sampling_time)
cpu_sampling := "node_cpu_seconds_total%20%7Binstance%3D%27" + node
cpu_sampling += "%27%2C%20mode%3D%27idle%27%7D%5B5" + samplingTime + "m%5D"
query := "query=100%20-%20(avg%20by%20(instance)(irate(" + cpu_sampling + "))%20*%20100)"
url := "http://localhost:9090/api/v1/query?" + query
jsonString, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "curl", "-s", url).Output()
o.Expect(err).NotTo(o.HaveOccurred())
var response Response
unmarshalErr := json.Unmarshal([]byte(jsonString), &response)
o.Expect(unmarshalErr).NotTo(o.HaveOccurred())
cpuUsage := response.Data.Result[0].Value[1].(string)
cpu_usage, err := strconv.ParseFloat(cpuUsage, 64)
o.Expect(err).NotTo(o.HaveOccurred())
return cpu_usage
}
func getClusterUptime(oc *exutil.CLI) (int, error) {
layout := "2006-01-02T15:04:05Z"
completionTime, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].status.history[*].completionTime}").Output()
returnTime, perr := time.Parse(layout, completionTime)
if perr != nil {
e2e.Logf("Error trying to parse uptime %s", perr)
return 0, perr
}
now := time.Now()
uptime := now.Sub(returnTime)
uptimeByMin := int(uptime.Minutes())
return uptimeByMin, nil
}
func getNodeavailMem(oc *exutil.CLI, node string) int {
query := "query=node_memory_MemAvailable_bytes%7Binstance%3D%27" + node + "%27%7D"
url := "http://localhost:9090/api/v1/query?" + query
jsonString, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "curl", "-s", url).Output()
o.Expect(err).NotTo(o.HaveOccurred())
var response Response
unmarshalErr := json.Unmarshal([]byte(jsonString), &response)
o.Expect(unmarshalErr).NotTo(o.HaveOccurred())
memUsage := response.Data.Result[0].Value[1].(string)
availableMem, err := strconv.Atoi(memUsage)
o.Expect(err).NotTo(o.HaveOccurred())
return availableMem
}
// make sure operator is not processing and degraded
func checkOperator(oc *exutil.CLI, operatorName string) (bool, error) {
output, err := oc.AsAdmin().Run("get").Args("clusteroperator", operatorName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if matched, _ := regexp.MatchString("True.*False.*False", output); !matched {
e2e.Logf("clusteroperator %s is abnormal\n", operatorName)
return false, nil
}
return true, nil
}
func waitForPodNotFound(oc *exutil.CLI, podName string, nameSpace string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (bool, error) {
out, err := oc.AsAdmin().Run("get").Args("-n", nameSpace, "pods", "-o=jsonpath={.items[*].metadata.name}").Output()
if err != nil {
return false, err
}
if !strings.Contains(out, podName) {
e2e.Logf("Pod %v still exists is", podName)
return true, nil
}
e2e.Logf("Pod %v exists, Trying again", podName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The test deployment job is running")
}
func getUserFromSecret(oc *exutil.CLI, namespace string, secretName string) string {
userbase64, pwderr := oc.AsAdmin().Run("get").Args("secrets", "-n", machineAPINamespace, secretName, "-o=jsonpath={.data.username}").Output()
o.Expect(pwderr).ShouldNot(o.HaveOccurred())
user, err := base64.StdEncoding.DecodeString(userbase64)
o.Expect(err).ShouldNot(o.HaveOccurred())
return string(user)
}
func getPassFromSecret(oc *exutil.CLI, namespace string, secretName string) string {
pwdbase64, pwderr := oc.AsAdmin().Run("get").Args("secrets", "-n", machineAPINamespace, secretName, "-o=jsonpath={.data.password}").Output()
o.Expect(pwderr).ShouldNot(o.HaveOccurred())
pwd, err := base64.StdEncoding.DecodeString(pwdbase64)
o.Expect(err).ShouldNot(o.HaveOccurred())
return string(pwd)
}
func CopyToFile(fromPath string, toFilename string) string {
// check if source file is regular file
srcFileStat, err := os.Stat(fromPath)
if err != nil {
e2e.Failf("get source file %s stat failed: %v", fromPath, err)
}
if !srcFileStat.Mode().IsRegular() {
e2e.Failf("source file %s is not a regular file", fromPath)
}
// open source file
source, err := os.Open(fromPath)
defer source.Close()
if err != nil {
e2e.Failf("open source file %s failed: %v", fromPath, err)
}
// open dest file
saveTo := filepath.Join(e2e.TestContext.OutputDir, toFilename)
dest, err := os.Create(saveTo)
defer dest.Close()
if err != nil {
e2e.Failf("open destination file %s failed: %v", saveTo, err)
}
// copy from source to dest
_, err = io.Copy(dest, source)
if err != nil {
e2e.Failf("copy file from %s to %s failed: %v", fromPath, saveTo, err)
}
return saveTo
}
func waitForBMHState(oc *exutil.CLI, bmhName string, bmhStatus string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 30*time.Minute, true, func(context.Context) (bool, error) {
statusOp, err := oc.AsAdmin().Run("get").Args("-n", machineAPINamespace, "bmh", bmhName, "-o=jsonpath={.status.provisioning.state}").Output()
if err != nil {
return false, err
}
if strings.Contains(statusOp, bmhStatus) {
e2e.Logf("BMH state %v is %v", bmhName, bmhStatus)
return true, nil
}
e2e.Logf("BMH %v state is %v, Trying again", bmhName, statusOp)
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The BMH state of %v is not as expected", bmhName))
}
func waitForBMHDeletion(oc *exutil.CLI, bmhName string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 30*time.Minute, true, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().Run("get").Args("-n", machineAPINamespace, "bmh", "-o=jsonpath={.items[*].metadata.name}").Output()
if err != nil {
return false, err
}
if !strings.Contains(out, bmhName) {
e2e.Logf("bmh %v still exists is", bmhName)
return true, nil
}
e2e.Logf("bmh %v exists, Trying again", bmhName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The BMH was not deleted as expected")
}
func getBypathDeviceName(oc *exutil.CLI, bmhName string) string {
byPath, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.status.hardware.storage[0].name}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return byPath
}
// clusterOperatorHealthcheck check abnormal operators
func clusterOperatorHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error {
e2e.Logf("Check the abnormal operators")
errCo := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
coLogFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "--no-headers").OutputToFile(dirname)
if err == nil {
cmd := fmt.Sprintf(`cat %v | grep -v '.True.*False.*False' || true`, coLogFile)
coLogs, err := exec.Command("bash", "-c", cmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(coLogs) > 0 {
return false, nil
}
} else {
return false, nil
}
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("No abnormality found in cluster operators...")
return true, nil
})
if errCo != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
return errCo
}
// clusterNodesHealthcheck check abnormal nodes
func clusterNodesHealthcheck(oc *exutil.CLI, waitTime int) error {
errNode := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
if err == nil {
if strings.Contains(output, "NotReady") || strings.Contains(output, "SchedulingDisabled") {
return false, nil
}
} else {
return false, nil
}
e2e.Logf("Nodes are normal...")
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
})
if errNode != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
return errNode
}
// checkNodeStatus
func checkNodeStatus(oc *exutil.CLI, pollIntervalSec time.Duration, pollDurationMinute time.Duration, nodeName string, nodeStatus string) error {
e2e.Logf("Check status of node %s", nodeName)
errNode := wait.PollUntilContextTimeout(context.Background(), pollIntervalSec, pollDurationMinute, false, func(ctx context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", nodeName, "-o=jsonpath={.status.conditions[3].status}").Output()
if err != nil || string(output) != nodeStatus {
e2e.Logf("Node status: %s. Trying again", output)
return false, nil
}
if string(output) == nodeStatus {
e2e.Logf("Node status: %s", output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errNode, "Node did not change state as expected")
return errNode
}
func buildFirmwareURL(vendor, currentVersion string) (string, string) {
var url, fileName string
iDRAC_71070 := "https://dl.dell.com/FOLDER11965413M/1/iDRAC_7.10.70.00_A00.exe"
iDRAC_71030 := "https://dl.dell.com/FOLDER11319105M/1/iDRAC_7.10.30.00_A00.exe"
ilo5_305 := "https://downloads.hpe.com/pub/softlib2/software1/fwpkg-ilo/p991377599/v247527/ilo5_305.fwpkg"
ilo5_302 := "https://downloads.hpe.com/pub/softlib2/software1/fwpkg-ilo/p991377599/v243854/ilo5_302.fwpkg"
ilo6_157 := "https://downloads.hpe.com/pub/softlib2/software1/fwpkg-ilo/p788720876/v247531/ilo6_160.fwpkg"
ilo6_160 := "https://downloads.hpe.com/pub/softlib2/software1/fwpkg-ilo/p788720876/v243858/ilo6_157.fwpkg"
switch vendor {
case "Dell Inc.":
fileName = "firmimgFIT.d9"
if currentVersion == "7.10.70.00" {
url = iDRAC_71030
} else if currentVersion == "7.10.30.00" {
url = iDRAC_71070
} else {
url = iDRAC_71070 // Default to 7.10.70.00
}
case "HPE":
// Extract the iLO version and assign the file name accordingly
if strings.Contains(currentVersion, "iLO 5") {
if currentVersion == "iLO 5 v3.05" {
url = ilo5_302
fileName = "ilo5_302.bin"
} else if currentVersion == "iLO 5 v3.02" {
url = ilo5_305
fileName = "ilo5_305.bin"
} else {
url = ilo5_305 // Default to v3.05
fileName = "ilo5_305.bin"
}
} else if strings.Contains(currentVersion, "iLO 6") {
if currentVersion == "iLO 6 v1.57" {
url = ilo6_160
fileName = "ilo6_160.bin"
} else if currentVersion == "iLO 6 v1.60" {
url = ilo6_157
fileName = "ilo6_157.bin"
} else {
url = ilo6_157 // Default to 1.57
fileName = "ilo6_157.bin"
}
} else {
g.Skip("Unsupported HPE BMC version")
}
default:
g.Skip("Unsupported vendor")
}
return url, fileName
}
func setProxyEnv() {
sharedProxy := filepath.Join(os.Getenv("SHARED_DIR"), "proxy-conf.sh")
if _, err := os.Stat(sharedProxy); err == nil {
e2e.Logf("proxy-conf.sh exists. Proxy environment variables are already set.")
return
}
proxyFilePath := filepath.Join(os.Getenv(clusterProfileDir), proxyFile)
if _, err := os.Stat(proxyFilePath); err == nil {
content, err := ioutil.ReadFile(proxyFilePath)
if err != nil {
e2e.Failf("Failed to read file: %v", err)
}
proxyValue := strings.TrimSpace(string(content))
proxyVars := []string{"HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy"}
for _, proxyVar := range proxyVars {
if err := os.Setenv(proxyVar, proxyValue); err != nil {
e2e.Failf("Failed to set %s: %v", proxyVar, err)
}
}
noProxyValue := "localhost,127.0.0.1"
os.Setenv("NO_PROXY", noProxyValue)
os.Setenv("no_proxy", noProxyValue)
e2e.Logf("Proxy environment variables are set.")
} else if os.IsNotExist(err) {
e2e.Failf("File does not exist at path: %s\n", proxyFilePath)
} else {
e2e.Failf("Error checking file: %v\n", err)
}
}
func unsetProxyEnv() {
sharedProxy := filepath.Join(os.Getenv("SHARED_DIR"), "proxy-conf.sh")
if _, err := os.Stat(sharedProxy); err == nil {
e2e.Logf("proxy-conf.sh exists. Not unsetting proxy enviornment variables.")
return
}
proxyVars := []string{"HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy", "NO_PROXY", "no_proxy"}
for _, proxyVar := range proxyVars {
err := os.Unsetenv(proxyVar)
if err != nil {
e2e.Failf("Failed to unset %s: %v", proxyVar, err)
}
}
e2e.Logf("Proxy environment variables are unset.")
}
func getHfsByVendor(oc *exutil.CLI, vendor, machineAPINamespace, host string) (string, string, error) {
var hfs, value, currStatus string
var err error
switch vendor {
case "Dell Inc.":
hfs = "LogicalProc"
case "HPE":
hfs = "NetworkBootRetry"
default:
g.Skip("Unsupported vendor")
return "", "", nil
}
currStatus, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("hfs", "-n", machineAPINamespace, host, fmt.Sprintf("-o=jsonpath={.status.settings.%s}", hfs)).Output()
if err != nil {
return "", "", fmt.Errorf("failed to fetch current status for %s: %v", hfs, err)
}
if currStatus == "Enabled" {
value = "Disabled"
} else {
value = "Enabled"
}
return hfs, value, nil
}
| package baremetal | ||||
function | openshift/openshift-tests-private | ba98e184-919c-44a9-97d1-69c0e787e622 | checkOperatorsRunning | ['"fmt"', '"io"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func checkOperatorsRunning(oc *exutil.CLI) (bool, error) {
jpath := `{range .items[*]}{.metadata.name}:{.status.conditions[?(@.type=='Available')].status}{':'}{.status.conditions[?(@.type=='Degraded')].status}{'\n'}{end}`
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperators.config.openshift.io", "-o", "jsonpath="+jpath).Output()
if err != nil {
return false, fmt.Errorf("failed to execute 'oc get clusteroperators.config.openshift.io' command: %v", err)
}
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
for _, line := range lines {
e2e.Logf("%s", line)
parts := strings.Split(line, ":")
available := parts[1] == "True"
degraded := parts[2] == "False"
if !available || !degraded {
return false, nil
}
}
return true, nil
} | baremetal | ||||
function | openshift/openshift-tests-private | 9f94d8aa-c49b-4ff7-a2b8-2db138ded77f | checkNodesRunning | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func checkNodesRunning(oc *exutil.CLI) (bool, error) {
nodeNames, nodeErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
if nodeErr != nil {
return false, fmt.Errorf("failed to execute 'oc get nodes' command: %v", nodeErr)
}
nodes := strings.Fields(nodeNames)
e2e.Logf("\nNode Names are %v", nodeNames)
for _, node := range nodes {
nodeStatus, statusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", node, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
if statusErr != nil {
return false, fmt.Errorf("failed to execute 'oc get nodes' command: %v", statusErr)
}
e2e.Logf("\nNode %s Status is %s\n", node, nodeStatus)
if nodeStatus != "True" {
return false, nil
}
}
return true, nil
} | baremetal | ||||
function | openshift/openshift-tests-private | 06139cd4-d022-45f2-83f2-f36748edf262 | waitForDeployStatus | ['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func waitForDeployStatus(oc *exutil.CLI, depName string, nameSpace string, depStatus string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (bool, error) {
statusOp, err := oc.AsAdmin().Run("get").Args("-n", nameSpace, "deployment", depName, "-o=jsonpath={.status.conditions[?(@.type=='Available')].status}'").Output()
if err != nil {
return false, err
}
if strings.Contains(statusOp, depStatus) {
e2e.Logf("Deployment %v state is %v", depName, depStatus)
return true, nil
}
e2e.Logf("deployment %v is state %v, Trying again", depName, statusOp)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The test deployment job is not running")
} | baremetal | ||||
function | openshift/openshift-tests-private | 7206819f-910c-4622-8902-3f32460bfab1 | getPodName | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func getPodName(oc *exutil.CLI, ns string) string {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nPod Name is %v", podName)
return podName
} | baremetal | |||||
function | openshift/openshift-tests-private | 136fd5cc-e322-434e-aa96-7b9c07233601 | getPodStatus | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func getPodStatus(oc *exutil.CLI, namespace string, podName string) string {
podStatus, err := oc.AsAdmin().Run("get").Args("pod", "-n", namespace, podName, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The pod %s status is %q", podName, podStatus)
return podStatus
} | baremetal | |||||
function | openshift/openshift-tests-private | 92e16424-531e-4ecf-afdf-1e3d76b5d156 | getNodeCpuUsage | ['"encoding/json"', '"os/exec"', '"strconv"'] | ['Response'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func getNodeCpuUsage(oc *exutil.CLI, node string, sampling_time int) float64 {
samplingTime := strconv.Itoa(sampling_time)
cpu_sampling := "node_cpu_seconds_total%20%7Binstance%3D%27" + node
cpu_sampling += "%27%2C%20mode%3D%27idle%27%7D%5B5" + samplingTime + "m%5D"
query := "query=100%20-%20(avg%20by%20(instance)(irate(" + cpu_sampling + "))%20*%20100)"
url := "http://localhost:9090/api/v1/query?" + query
jsonString, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "curl", "-s", url).Output()
o.Expect(err).NotTo(o.HaveOccurred())
var response Response
unmarshalErr := json.Unmarshal([]byte(jsonString), &response)
o.Expect(unmarshalErr).NotTo(o.HaveOccurred())
cpuUsage := response.Data.Result[0].Value[1].(string)
cpu_usage, err := strconv.ParseFloat(cpuUsage, 64)
o.Expect(err).NotTo(o.HaveOccurred())
return cpu_usage
} | baremetal | |||
function | openshift/openshift-tests-private | 53172bdb-89ad-40c0-8d6a-8458d0f1f298 | getClusterUptime | ['"time"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func getClusterUptime(oc *exutil.CLI) (int, error) {
layout := "2006-01-02T15:04:05Z"
completionTime, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].status.history[*].completionTime}").Output()
returnTime, perr := time.Parse(layout, completionTime)
if perr != nil {
e2e.Logf("Error trying to parse uptime %s", perr)
return 0, perr
}
now := time.Now()
uptime := now.Sub(returnTime)
uptimeByMin := int(uptime.Minutes())
return uptimeByMin, nil
} | baremetal | ||||
function | openshift/openshift-tests-private | 4a2a5b8a-f777-4c90-bd1e-cc902865f443 | getNodeavailMem | ['"encoding/json"', '"os/exec"', '"strconv"'] | ['Response'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func getNodeavailMem(oc *exutil.CLI, node string) int {
query := "query=node_memory_MemAvailable_bytes%7Binstance%3D%27" + node + "%27%7D"
url := "http://localhost:9090/api/v1/query?" + query
jsonString, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "--", "curl", "-s", url).Output()
o.Expect(err).NotTo(o.HaveOccurred())
var response Response
unmarshalErr := json.Unmarshal([]byte(jsonString), &response)
o.Expect(unmarshalErr).NotTo(o.HaveOccurred())
memUsage := response.Data.Result[0].Value[1].(string)
availableMem, err := strconv.Atoi(memUsage)
o.Expect(err).NotTo(o.HaveOccurred())
return availableMem
} | baremetal | |||
function | openshift/openshift-tests-private | 00e62912-2307-4a23-b902-1c040b69b9af | checkOperator | ['"regexp"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func checkOperator(oc *exutil.CLI, operatorName string) (bool, error) {
output, err := oc.AsAdmin().Run("get").Args("clusteroperator", operatorName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if matched, _ := regexp.MatchString("True.*False.*False", output); !matched {
e2e.Logf("clusteroperator %s is abnormal\n", operatorName)
return false, nil
}
return true, nil
} | baremetal | ||||
function | openshift/openshift-tests-private | 1909cc49-be6d-4ed9-994d-74dfeb7ed25b | waitForPodNotFound | ['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func waitForPodNotFound(oc *exutil.CLI, podName string, nameSpace string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (bool, error) {
out, err := oc.AsAdmin().Run("get").Args("-n", nameSpace, "pods", "-o=jsonpath={.items[*].metadata.name}").Output()
if err != nil {
return false, err
}
if !strings.Contains(out, podName) {
e2e.Logf("Pod %v still exists is", podName)
return true, nil
}
e2e.Logf("Pod %v exists, Trying again", podName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The test deployment job is running")
} | baremetal | ||||
function | openshift/openshift-tests-private | 9ecaa7e2-a27b-4971-89ea-12dcd9a27449 | getUserFromSecret | ['"encoding/base64"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func getUserFromSecret(oc *exutil.CLI, namespace string, secretName string) string {
userbase64, pwderr := oc.AsAdmin().Run("get").Args("secrets", "-n", machineAPINamespace, secretName, "-o=jsonpath={.data.username}").Output()
o.Expect(pwderr).ShouldNot(o.HaveOccurred())
user, err := base64.StdEncoding.DecodeString(userbase64)
o.Expect(err).ShouldNot(o.HaveOccurred())
return string(user)
} | baremetal | ||||
function | openshift/openshift-tests-private | 317f47ae-9710-432a-96fc-781b872dbce0 | getPassFromSecret | ['"encoding/base64"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func getPassFromSecret(oc *exutil.CLI, namespace string, secretName string) string {
pwdbase64, pwderr := oc.AsAdmin().Run("get").Args("secrets", "-n", machineAPINamespace, secretName, "-o=jsonpath={.data.password}").Output()
o.Expect(pwderr).ShouldNot(o.HaveOccurred())
pwd, err := base64.StdEncoding.DecodeString(pwdbase64)
o.Expect(err).ShouldNot(o.HaveOccurred())
return string(pwd)
} | baremetal | ||||
function | openshift/openshift-tests-private | bf916b7b-484f-473c-80a9-fa08d60d1302 | CopyToFile | ['"io"', '"os"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func CopyToFile(fromPath string, toFilename string) string {
// check if source file is regular file
srcFileStat, err := os.Stat(fromPath)
if err != nil {
e2e.Failf("get source file %s stat failed: %v", fromPath, err)
}
if !srcFileStat.Mode().IsRegular() {
e2e.Failf("source file %s is not a regular file", fromPath)
}
// open source file
source, err := os.Open(fromPath)
defer source.Close()
if err != nil {
e2e.Failf("open source file %s failed: %v", fromPath, err)
}
// open dest file
saveTo := filepath.Join(e2e.TestContext.OutputDir, toFilename)
dest, err := os.Create(saveTo)
defer dest.Close()
if err != nil {
e2e.Failf("open destination file %s failed: %v", saveTo, err)
}
// copy from source to dest
_, err = io.Copy(dest, source)
if err != nil {
e2e.Failf("copy file from %s to %s failed: %v", fromPath, saveTo, err)
}
return saveTo
} | baremetal | ||||
function | openshift/openshift-tests-private | db3ec2fe-6964-475f-8476-e2beb958d390 | waitForBMHState | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func waitForBMHState(oc *exutil.CLI, bmhName string, bmhStatus string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 30*time.Minute, true, func(context.Context) (bool, error) {
statusOp, err := oc.AsAdmin().Run("get").Args("-n", machineAPINamespace, "bmh", bmhName, "-o=jsonpath={.status.provisioning.state}").Output()
if err != nil {
return false, err
}
if strings.Contains(statusOp, bmhStatus) {
e2e.Logf("BMH state %v is %v", bmhName, bmhStatus)
return true, nil
}
e2e.Logf("BMH %v state is %v, Trying again", bmhName, statusOp)
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The BMH state of %v is not as expected", bmhName))
} | baremetal | ||||
function | openshift/openshift-tests-private | 3b7cff58-1c8e-459f-b6c2-15d159f03c5b | waitForBMHDeletion | ['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func waitForBMHDeletion(oc *exutil.CLI, bmhName string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 30*time.Minute, true, func(ctx context.Context) (bool, error) {
out, err := oc.AsAdmin().Run("get").Args("-n", machineAPINamespace, "bmh", "-o=jsonpath={.items[*].metadata.name}").Output()
if err != nil {
return false, err
}
if !strings.Contains(out, bmhName) {
e2e.Logf("bmh %v still exists is", bmhName)
return true, nil
}
e2e.Logf("bmh %v exists, Trying again", bmhName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "The BMH was not deleted as expected")
} | baremetal | ||||
function | openshift/openshift-tests-private | 67a6292f-80fc-4a84-9dac-ce36bb7ab426 | getBypathDeviceName | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func getBypathDeviceName(oc *exutil.CLI, bmhName string) string {
byPath, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("bmh", "-n", machineAPINamespace, bmhName, "-o=jsonpath={.status.hardware.storage[0].name}").Output()
o.Expect(err).ShouldNot(o.HaveOccurred())
return byPath
} | baremetal | |||||
function | openshift/openshift-tests-private | 5760732d-c522-4907-9291-f880e812d556 | clusterOperatorHealthcheck | ['"context"', '"fmt"', '"os/exec"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func clusterOperatorHealthcheck(oc *exutil.CLI, waitTime int, dirname string) error {
e2e.Logf("Check the abnormal operators")
errCo := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
coLogFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "--no-headers").OutputToFile(dirname)
if err == nil {
cmd := fmt.Sprintf(`cat %v | grep -v '.True.*False.*False' || true`, coLogFile)
coLogs, err := exec.Command("bash", "-c", cmd).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(coLogs) > 0 {
return false, nil
}
} else {
return false, nil
}
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("No abnormality found in cluster operators...")
return true, nil
})
if errCo != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
return errCo
} | baremetal | ||||
function | openshift/openshift-tests-private | 183d1d7b-ba14-4eac-be56-8ba7ce7b0c62 | clusterNodesHealthcheck | ['"context"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func clusterNodesHealthcheck(oc *exutil.CLI, waitTime int) error {
errNode := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, time.Duration(waitTime)*time.Second, false, func(cxt context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Output()
if err == nil {
if strings.Contains(output, "NotReady") || strings.Contains(output, "SchedulingDisabled") {
return false, nil
}
} else {
return false, nil
}
e2e.Logf("Nodes are normal...")
err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
return true, nil
})
if errNode != nil {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
return errNode
} | baremetal | ||||
function | openshift/openshift-tests-private | f527d3dd-4d24-494a-b241-21de3b4298cf | checkNodeStatus | ['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func checkNodeStatus(oc *exutil.CLI, pollIntervalSec time.Duration, pollDurationMinute time.Duration, nodeName string, nodeStatus string) error {
e2e.Logf("Check status of node %s", nodeName)
errNode := wait.PollUntilContextTimeout(context.Background(), pollIntervalSec, pollDurationMinute, false, func(ctx context.Context) (bool, error) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", nodeName, "-o=jsonpath={.status.conditions[3].status}").Output()
if err != nil || string(output) != nodeStatus {
e2e.Logf("Node status: %s. Trying again", output)
return false, nil
}
if string(output) == nodeStatus {
e2e.Logf("Node status: %s", output)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errNode, "Node did not change state as expected")
return errNode
} | baremetal | ||||
function | openshift/openshift-tests-private | e96ed505-48ec-4f96-a3ba-beadb17e5f9b | buildFirmwareURL | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func buildFirmwareURL(vendor, currentVersion string) (string, string) {
var url, fileName string
iDRAC_71070 := "https://dl.dell.com/FOLDER11965413M/1/iDRAC_7.10.70.00_A00.exe"
iDRAC_71030 := "https://dl.dell.com/FOLDER11319105M/1/iDRAC_7.10.30.00_A00.exe"
ilo5_305 := "https://downloads.hpe.com/pub/softlib2/software1/fwpkg-ilo/p991377599/v247527/ilo5_305.fwpkg"
ilo5_302 := "https://downloads.hpe.com/pub/softlib2/software1/fwpkg-ilo/p991377599/v243854/ilo5_302.fwpkg"
ilo6_157 := "https://downloads.hpe.com/pub/softlib2/software1/fwpkg-ilo/p788720876/v247531/ilo6_160.fwpkg"
ilo6_160 := "https://downloads.hpe.com/pub/softlib2/software1/fwpkg-ilo/p788720876/v243858/ilo6_157.fwpkg"
switch vendor {
case "Dell Inc.":
fileName = "firmimgFIT.d9"
if currentVersion == "7.10.70.00" {
url = iDRAC_71030
} else if currentVersion == "7.10.30.00" {
url = iDRAC_71070
} else {
url = iDRAC_71070 // Default to 7.10.70.00
}
case "HPE":
// Extract the iLO version and assign the file name accordingly
if strings.Contains(currentVersion, "iLO 5") {
if currentVersion == "iLO 5 v3.05" {
url = ilo5_302
fileName = "ilo5_302.bin"
} else if currentVersion == "iLO 5 v3.02" {
url = ilo5_305
fileName = "ilo5_305.bin"
} else {
url = ilo5_305 // Default to v3.05
fileName = "ilo5_305.bin"
}
} else if strings.Contains(currentVersion, "iLO 6") {
if currentVersion == "iLO 6 v1.57" {
url = ilo6_160
fileName = "ilo6_160.bin"
} else if currentVersion == "iLO 6 v1.60" {
url = ilo6_157
fileName = "ilo6_157.bin"
} else {
url = ilo6_157 // Default to 1.57
fileName = "ilo6_157.bin"
}
} else {
g.Skip("Unsupported HPE BMC version")
}
default:
g.Skip("Unsupported vendor")
}
return url, fileName
} | baremetal | ||||
function | openshift/openshift-tests-private | 2c31f761-2d41-4383-8bff-7ab20e146ee9 | setProxyEnv | ['"io/ioutil"', '"os"', '"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func setProxyEnv() {
sharedProxy := filepath.Join(os.Getenv("SHARED_DIR"), "proxy-conf.sh")
if _, err := os.Stat(sharedProxy); err == nil {
e2e.Logf("proxy-conf.sh exists. Proxy environment variables are already set.")
return
}
proxyFilePath := filepath.Join(os.Getenv(clusterProfileDir), proxyFile)
if _, err := os.Stat(proxyFilePath); err == nil {
content, err := ioutil.ReadFile(proxyFilePath)
if err != nil {
e2e.Failf("Failed to read file: %v", err)
}
proxyValue := strings.TrimSpace(string(content))
proxyVars := []string{"HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy"}
for _, proxyVar := range proxyVars {
if err := os.Setenv(proxyVar, proxyValue); err != nil {
e2e.Failf("Failed to set %s: %v", proxyVar, err)
}
}
noProxyValue := "localhost,127.0.0.1"
os.Setenv("NO_PROXY", noProxyValue)
os.Setenv("no_proxy", noProxyValue)
e2e.Logf("Proxy environment variables are set.")
} else if os.IsNotExist(err) {
e2e.Failf("File does not exist at path: %s\n", proxyFilePath)
} else {
e2e.Failf("Error checking file: %v\n", err)
}
} | baremetal | ||||
function | openshift/openshift-tests-private | 30739945-a294-4f7d-b0f3-5fa8cf222049 | unsetProxyEnv | ['"os"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func unsetProxyEnv() {
sharedProxy := filepath.Join(os.Getenv("SHARED_DIR"), "proxy-conf.sh")
if _, err := os.Stat(sharedProxy); err == nil {
e2e.Logf("proxy-conf.sh exists. Not unsetting proxy enviornment variables.")
return
}
proxyVars := []string{"HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy", "NO_PROXY", "no_proxy"}
for _, proxyVar := range proxyVars {
err := os.Unsetenv(proxyVar)
if err != nil {
e2e.Failf("Failed to unset %s: %v", proxyVar, err)
}
}
e2e.Logf("Proxy environment variables are unset.")
} | baremetal | ||||
function | openshift/openshift-tests-private | 1596838d-1493-40b6-b005-c15b85e2f376 | getHfsByVendor | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/installer/baremetal/utils.go | func getHfsByVendor(oc *exutil.CLI, vendor, machineAPINamespace, host string) (string, string, error) {
var hfs, value, currStatus string
var err error
switch vendor {
case "Dell Inc.":
hfs = "LogicalProc"
case "HPE":
hfs = "NetworkBootRetry"
default:
g.Skip("Unsupported vendor")
return "", "", nil
}
currStatus, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("hfs", "-n", machineAPINamespace, host, fmt.Sprintf("-o=jsonpath={.status.settings.%s}", hfs)).Output()
if err != nil {
return "", "", fmt.Errorf("failed to fetch current status for %s: %v", hfs, err)
}
if currStatus == "Enabled" {
value = "Disabled"
} else {
value = "Enabled"
}
return hfs, value, nil
} | baremetal | ||||
test | openshift/openshift-tests-private | abc11ef7-3a62-43d7-ba26-7a49dfb3b4fb | kata | import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"golang.org/x/exp/slices"
"golang.org/x/mod/semver"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/gjson"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | // Package kata operator tests
package kata
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"golang.org/x/exp/slices"
"golang.org/x/mod/semver"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/gjson"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-kata] Kata", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("kata", exutil.KubeConfigPath())
testDataDir = exutil.FixturePath("testdata", "kata")
kcTemplate = filepath.Join(testDataDir, "kataconfig.yaml")
defaultDeployment = filepath.Join(testDataDir, "workload-deployment-securityContext.yaml")
defaultPod = filepath.Join(testDataDir, "workload-pod-securityContext.yaml")
subTemplate = filepath.Join(testDataDir, "subscription_template.yaml")
namespaceTemplate = filepath.Join(testDataDir, "namespace.yaml")
ogTemplate = filepath.Join(testDataDir, "operatorgroup.yaml")
redirectFile = filepath.Join(testDataDir, "ImageTag-DigestMirrorSet.yaml")
redirectType = "ImageTagMirrorSet"
redirectName = "kata-brew-registry"
clusterVersion string
cloudPlatform string
configmapExists bool
ocpMajorVer string
ocpMinorVer string
minorVer int
opNamespace = "openshift-sandboxed-containers-operator"
ppParam PeerpodParam
ppRuntimeClass = "kata-remote"
ppSecretName = "peer-pods-secret"
ppConfigMapName = "peer-pods-cm"
ppParamsLibvirtConfigMapName = "libvirt-podvm-image-cm"
secretTemplateAws = filepath.Join(testDataDir, "peer-pod-secret-aws.yaml")
secretTemplateLibvirt = filepath.Join(testDataDir, "peer-pod-secret-libvirt.yaml")
ppConfigMapTemplate string
ppAWSConfigMapTemplate = filepath.Join(testDataDir, "peer-pod-aws-cm-template.yaml")
ppAzureConfigMapTemplate = filepath.Join(testDataDir, "peer-pod-azure-cm-template.yaml")
ppLibvirtConfigMapTemplate = filepath.Join(testDataDir, "peer-pod-libvirt-cm-template.yaml")
ppParamsLibvirtConfigMapTemplate = filepath.Join(testDataDir, "peer-pods-param-libvirt-cm-template.yaml")
podAnnotatedTemplate = filepath.Join(testDataDir, "pod-annotations-template.yaml")
featureGatesFile = filepath.Join(testDataDir, "cc-feature-gates-cm.yaml")
kbsClientTemplate = filepath.Join(testDataDir, "kbs-client-template.yaml")
trusteeCosignedPodFile = filepath.Join(testDataDir, "trustee-cosigned-pod.yaml")
testrunConfigmapNs = "default"
testrunConfigmapName = "osc-config"
scratchRpmName string
trusteeRouteHost string
)
subscription := SubscriptionDescription{
subName: "sandboxed-containers-operator",
namespace: opNamespace,
catalogSourceName: "redhat-operators",
catalogSourceNamespace: "openshift-marketplace",
channel: "stable",
ipApproval: "Automatic",
operatorPackage: "sandboxed-containers-operator",
template: subTemplate,
}
kataconfig := KataconfigDescription{
name: "example-kataconfig",
template: kcTemplate,
logLevel: "info",
eligibility: false,
runtimeClassName: "kata",
enablePeerPods: false,
}
testrun := TestRunDescription{
checked: false,
operatorVer: "1.7.0",
catalogSourceName: subscription.catalogSourceName,
channel: subscription.channel,
redirectNeeded: false,
mustgatherImage: "registry.redhat.io/openshift-sandboxed-containers/osc-must-gather-rhel9:latest",
eligibility: kataconfig.eligibility,
labelSingleNode: false,
eligibleSingleNode: false,
runtimeClassName: kataconfig.runtimeClassName,
enablePeerPods: kataconfig.enablePeerPods,
enableGPU: false,
podvmImageUrl: "https://raw.githubusercontent.com/openshift/sandboxed-containers-operator/devel/config/peerpods/podvm/",
workloadImage: "quay.io/openshift/origin-hello-openshift",
installKataRPM: false,
workloadToTest: "kata",
trusteeCatalogSourcename: "redhat-operators",
trusteeUrl: "",
}
trusteeSubscription := SubscriptionDescription{
subName: "trustee-operator",
namespace: "trustee-operator-system",
catalogSourceName: testrun.trusteeCatalogSourcename,
catalogSourceNamespace: "openshift-marketplace",
channel: "stable",
ipApproval: "Automatic",
operatorPackage: "trustee-operator",
template: subTemplate,
}
g.BeforeEach(func() {
// Creating/deleting kataconfig reboots all worker node and extended-platform-tests may timeout.
// --------- AWS baremetal may take >20m per node ----------------
// add --timeout 70m
// tag with [Slow][Serial][Disruptive] when deleting/recreating kataconfig
var (
err error
msg string
)
// run once on startup to populate vars, create ns, og, label nodes
// always log cluster setup on each test
if testrun.checked {
e2e.Logf("\n Cluster: %v.%v on %v\n configmapExists %v\n testrun %v\n subscription %v\n kataconfig %v\n\n", ocpMajorVer, ocpMinorVer, cloudPlatform, configmapExists, testrun, subscription, kataconfig)
if scratchRpmName != "" {
e2e.Logf("Scratch rpm %v was installed", scratchRpmName)
}
} else {
cloudPlatform = getCloudProvider(oc)
clusterVersion, ocpMajorVer, ocpMinorVer, minorVer = getClusterVersion(oc)
configmapExists, err := getTestRunParameters(oc, &subscription, &kataconfig, &testrun, testrunConfigmapNs, testrunConfigmapName)
if err != nil {
// if there is an error, fail every test
e2e.Failf("ERROR: testrun configmap %v errors: %v\n%v %v", testrunConfigmapName, testrun, configmapExists, err)
}
// trusteeSubscription isn't passed into getTestRunParameters()
trusteeSubscription.catalogSourceName = testrun.trusteeCatalogSourcename
e2e.Logf("\n Cluster: %v.%v on %v\n configmapExists %v\n testrun %v\n subscription %v\n kataconfig %v\n\n", ocpMajorVer, ocpMinorVer, cloudPlatform, configmapExists, testrun, subscription, kataconfig)
testrun.checked = false // only set it true at the end
if testrun.redirectNeeded {
if ocpMajorVer == "4" && minorVer <= 12 {
redirectType = "ImageContentSourcePolicy"
redirectFile = filepath.Join(testDataDir, "ImageContentSourcePolicy-brew.yaml")
}
err = applyImageRedirect(oc, redirectFile, redirectType, redirectName)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
}
err = ensureNamespaceIsInstalled(oc, subscription.namespace, namespaceTemplate)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
err = ensureOperatorGroupIsInstalled(oc, subscription.namespace, ogTemplate)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
checkAndLabelCustomNodes(oc, testrun)
// o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
if kataconfig.eligibility {
labelEligibleNodes(oc, testrun)
// o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
}
testrun.checked = true
e2e.Logf("configmapExists %v\n testrun %v\n\n", configmapExists, testrun)
}
e2e.Logf("The current platform is %v. OCP %v.%v cluster version: %v", cloudPlatform, ocpMajorVer, ocpMinorVer, clusterVersion)
err = ensureOperatorIsSubscribed(oc, subscription, subTemplate)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
e2e.Logf("---------- subscription %v succeeded with channel %v %v", subscription.subName, subscription.channel, err)
err = checkKataconfigIsCreated(oc, subscription, kataconfig.name)
if err == nil {
msgSuccess := fmt.Sprintf("(2) subscription %v and kataconfig %v exists, skipping operator deployment", subscription.subName, kataconfig.name)
e2e.Logf(msgSuccess)
// kata is installed already
// have rpms been installed before we skip out of g.BeforeEach()?
if testrun.installKataRPM {
e2e.Logf("INFO Trying to install scratch rpm")
scratchRpmName, err = installKataContainerRPM(oc, &testrun)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: could not rpm -Uvh /var/local/%v: %v", scratchRpmName, err))
// Its installed now
e2e.Logf("INFO Scratch rpm %v was installed", scratchRpmName)
testrun.installKataRPM = false
msg, err = oc.AsAdmin().Run("patch").Args("configmap", testrunConfigmapName, "-n", testrunConfigmapNs, "--type", "merge",
"--patch", "{\"data\":{\"installKataRPM\":\"false\"}}").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Error: Patching fails on %v with installKataRPM=false: %v %v", testrunConfigmapName, msg, err))
}
return
}
// REST OF THIS FUNC will be executed ONLY if kataconfig was not found
// should be ensurePeerPodSecrets & configmaps
//create peer pods secret and peer pods cm for OSC prev to 1.7.0
if kataconfig.enablePeerPods {
baseVer := strings.Split(testrun.operatorVer, "-")[0]
if strings.Contains(testrun.operatorVer, "1.6.0") || strings.Contains(testrun.operatorVer, "1.5.3") {
msg, err = createApplyPeerPodSecrets(oc, cloudPlatform, ppParam, opNamespace, ppSecretName, secretTemplateAws)
if err != nil {
err = fmt.Errorf("Cloud Credentials not found") // Generate a custom error
e2e.Failf("Cloud Credentials not found. Skipping test suite execution msg: %v , err: %v", msg, err)
}
} else if semver.Compare(baseVer, "v1.7.0") >= 0 && cloudPlatform == "libvirt" {
msg, err = createApplyPeerPodsParamLibvirtConfigMap(oc, cloudPlatform, ppParam, opNamespace, ppParamsLibvirtConfigMapName, ppParamsLibvirtConfigMapTemplate)
if err != nil {
err = fmt.Errorf("Libvirt configs not found") // Generate a custom error
e2e.Failf("Cloud Credentials not found. Skipping test suite execution msg: %v , err: %v", msg, err)
}
msg, err = createApplyPeerPodSecrets(oc, cloudPlatform, ppParam, opNamespace, ppSecretName, secretTemplateLibvirt)
if err != nil {
err = fmt.Errorf("Libvirt configs not found") // Generate a custom error
e2e.Failf("Cloud Credentials not found. Skipping test suite execution msg: %v , err: %v", msg, err)
}
}
switch cloudPlatform {
case "azure":
ppConfigMapTemplate = ppAzureConfigMapTemplate
case "aws":
ppConfigMapTemplate = ppAWSConfigMapTemplate
case "libvirt":
ppConfigMapTemplate = ppLibvirtConfigMapTemplate
default:
e2e.Failf("Cloud provider %v is not supported", cloudPlatform)
}
msg, err = createApplyPeerPodConfigMap(oc, cloudPlatform, ppParam, opNamespace, ppConfigMapName, ppConfigMapTemplate)
if err != nil {
e2e.Failf("peer-pods-cm NOT applied msg: %v , err: %v", msg, err)
} else if cloudPlatform == "azure" || cloudPlatform == "libvirt" {
err = createSSHPeerPodsKeys(oc, ppParam, cloudPlatform)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
}
if cloudPlatform == "aws" && (strings.Contains(testrun.operatorVer, "1.6.0") || strings.Contains(testrun.operatorVer, "1.5.3")) {
e2e.Logf("patch cm for DISABLECVM=true for OSC 1.5.3 and 1.6.0")
msg, err = oc.AsAdmin().Run("patch").Args("configmap", ppConfigMapName, "-n", opNamespace, "--type", "merge",
"--patch", "{\"data\":{\"DISABLECVM\":\"true\"}}").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not patch DISABLECVM=true \n error: %v %v", msg, err))
}
//new flow for GPU prior to image building job
if testrun.enableGPU {
cmName := cloudPlatform + "-podvm-image-cm"
//fix till CI will be fixed
//cmUrl := testrun.podvmImageUrl + cmName + ".yaml"
cmUrl := "https://raw.githubusercontent.com/openshift/sandboxed-containers-operator/devel/config/peerpods/podvm/" + cmName + ".yaml"
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", cmUrl).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("issue applying podvm image configmap %v: %v, %v", cmUrl, msg, err))
patchPodvmEnableGPU(oc, opNamespace, cmName, "yes")
if cloudPlatform == "azure" {
msg, err = oc.AsAdmin().Run("patch").Args("configmap", cmName, "-n", opNamespace, "--type", "merge",
"--patch", "{\"data\":{\"IMAGE_GALLERY_NAME\":\"ginkgo"+getRandomString()+"\"}}").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not patch IMAGE_GALLERY_NAME\n error: %v %v", msg, err))
}
}
}
if testrun.workloadToTest == "coco" {
err = ensureFeatureGateIsApplied(oc, subscription, featureGatesFile)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: could not apply osc-feature-gates cm: %v", err))
trusteeRouteHost, err = ensureTrusteeIsInstalled(oc, trusteeSubscription, namespaceTemplate, ogTemplate, subTemplate)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: %v err: %v", trusteeRouteHost, err))
msg, err = configureTrustee(oc, trusteeSubscription, testDataDir, testrun.trusteeUrl)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: configuring trustee: %v", err))
testrun.trusteeUrl = msg
e2e.Logf("INFO in-cluster TRUSTEE_HOST is %v.\nINFO The trusteeUrl to be used is %v", trusteeRouteHost, testrun.trusteeUrl)
err = ensureTrusteeUrlReturnIsValid(oc, kbsClientTemplate, testrun.trusteeUrl, "cmVzMXZhbDE=", trusteeSubscription.namespace)
if err != nil {
testrun.checked = false // fail all tests
}
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: failing all tests \nERROR: %v", err))
patch := fmt.Sprintf("{\"data\":{\"AA_KBC_PARAMS\": \"cc_kbc::%v\"}}", testrun.trusteeUrl)
msg, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm", "peer-pods-cm", "--type", "merge", "-p", patch, "-n", subscription.namespace).Output()
if err != nil {
e2e.Logf("WARNING: patching peer-pods-cm: %v %v", msg, err)
}
cocoDefaultSize := map[string]string{
"aws": "{\"data\":{\"PODVM_INSTANCE_TYPE\":\"m6a.large\"}}",
"azure": "{\"data\":{\"AZURE_INSTANCE_SIZE\":\"Standard_DC2as_v5\"}}",
}
msg, err = oc.AsAdmin().Run("patch").Args("configmap", ppConfigMapName, "-n", opNamespace, "--type", "merge",
"--patch", cocoDefaultSize[cloudPlatform]).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not patch default instance for coco \n error: %v %v", msg, err))
// oc set env ds/peerpodconfig-ctrl-caa-daemon -n openshift-sandboxed-containers-operator REBOOT="$(date)"
}
// should check kataconfig here & already have checked subscription
msg, err = ensureKataconfigIsCreated(oc, kataconfig, subscription)
e2e.Logf("---------- kataconfig %v create succeeded %v %v", kataconfig.name, msg, err)
// this should be a seperate day0 test for control pods
if kataconfig.enablePeerPods {
//TODO implement single function with the list of deployments for kata/peer pod/ coco
checkPeerPodControl(oc, opNamespace, podRunState)
}
// kata wasn't installed before so this isn't skipped
// Do rpms need installion?
if testrun.installKataRPM {
e2e.Logf("INFO Trying to install scratch rpm")
scratchRpmName, err = installKataContainerRPM(oc, &testrun)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: could not install scratch %v failed: %v", scratchRpmName, err))
// Its installed now
e2e.Logf("INFO Scratch rpm %v was installed", scratchRpmName)
testrun.installKataRPM = false
msg, err = oc.AsAdmin().Run("patch").Args("configmap", testrunConfigmapName, "-n", testrunConfigmapNs, "--type", "merge",
"--patch", "{\"data\":{\"installKataRPM\":\"false\"}}").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Error: Patching fails on %v with installKataRPM=false: %v %v", testrunConfigmapName, msg, err))
}
})
g.It("Author:abhbaner-High-39499-Operator installation", func() {
g.By("Checking sandboxed-operator operator installation")
_, err := subscriptionIsFinished(oc, subscription)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("SUCCESSS - sandboxed-operator operator installed")
})
g.It("Author:abhbaner-High-43522-Common Kataconfig installation", func() {
g.Skip("test require structure rework")
g.By("Install Common kataconfig and verify it")
e2e.Logf("common kataconfig %v is installed", kataconfig.name)
err := checkKataconfigIsCreated(oc, subscription, kataconfig.name)
if err != nil {
e2e.Failf("ERROR: kataconfig install failed: %v", err)
}
/* kataconfig status changed so this does not work.
These check should be moved to a function
nodeKataList := getAllKataNodes(oc, kataconfig.eligibility, subscription.namespace, featureLabel, customLabel)
o.Expect(len(nodeKataList) > 0).To(o.BeTrue())
nodeKataCount := fmt.Sprintf("%d", len(nodeKataList))
jsonKataStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kataconfig.name, "-o=jsonpath={.status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
totalCount := gjson.Get(jsonKataStatus, "totalNodesCount").String()
o.Expect(totalCount).To(o.Equal(nodeKataCount))
completeCount := gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesCount").String()
o.Expect(totalCount).To(o.Equal(completeCount))
completededListCount := gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesList.#").String()
o.Expect(completededListCount == totalCount)
e2e.Logf("Completed nodes are %v", gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesList").String())
o.Expect(totalCount).To(o.Equal(nodeKataCount))
completeCount := gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesCount").String()
o.Expect(totalCount).To(o.Equal(completeCount))
completededListCount := gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesList.#").String()
o.Expect(completededListCount == totalCount)
e2e.Logf("Completed nodes are %v", gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesList").String())
g.By("SUCCESSS - kataconfig installed and it's structure is verified")
*/
})
g.It("Author:tbuskey-High-66108-Version in operator CSV should match expected version", func() {
if !testrun.checked {
g.Skip("osc-config cm or OSCSOPERATORVER are not set so there is no expected version to compare")
}
var (
err error
csvName string
csvVersion string
)
csvName, err = oc.AsAdmin().Run("get").Args("sub", subscription.subName, "-n", subscription.namespace, "-o=jsonpath={.status.installedCSV}").Output()
if err != nil || csvName == "" {
e2e.Logf("Error: Not able to get csv from sub %v: %v %v", subscription.subName, csvName, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
csvVersion, err = oc.AsAdmin().Run("get").Args("csv", csvName, "-n", subscription.namespace, "-o=jsonpath={.spec.version}").Output()
if err != nil || csvVersion == "" {
e2e.Logf("Error: Not able to get version from csv %v: %v %v", csvName, csvVersion, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvVersion).NotTo(o.BeEmpty())
cleanVer := strings.Split(testrun.operatorVer, "-")
if csvVersion != cleanVer[0] {
e2e.Logf("Error: expecting %v but CSV has %v", testrun.operatorVer, csvVersion)
}
o.Expect(csvVersion).To(o.Equal(cleanVer[0]))
})
g.It("Author:tbuskey-Medium-63122-Checking if cluster is ready for peer pods", func() {
// can't *VERIFY* all values but we can ensure the cm/secret variables were added by the users
if !kataconfig.enablePeerPods {
g.Skip("STEP Peer pods are not enabled with osc-config or OSCSENABLEPEERPODS")
}
var (
err error
msg string
errors = 0
errorList = []string{""}
)
// set the CLOUD_PROVIDER value from the peerpods configmap
cloudProvider, err := oc.AsAdmin().Run("get").Args("cm", ppConfigMapName, "-n", subscription.namespace, "-o=jsonpath={.data.CLOUD_PROVIDER}").Output()
if err != nil || strings.Contains(cloudProvider, "not found") {
e2e.Logf("STEP ERROR: peerpod configmap issue %v %v", cloudProvider, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
if len(cloudProvider) == 0 {
e2e.Logf("STEP ERROR: CLOUD_PROVIDER is not set on peerpod config")
o.Expect(cloudProvider).ToNot(o.BeZero())
}
msg = fmt.Sprintf("checking %v ", ppSecretName)
g.By(msg)
msg, err = checkPeerPodSecrets(oc, subscription.namespace, cloudProvider, ppSecretName)
if err != nil {
e2e.Logf("%v", msg)
errors = errors + 1
errorList = append(errorList, msg)
}
msg = fmt.Sprintf("checking %v ", ppConfigMapName)
g.By(msg)
msg, err = checkPeerPodConfigMap(oc, subscription.namespace, cloudProvider, ppConfigMapName)
if err != nil {
e2e.Logf("%v", msg)
errors = errors + 1
errorList = append(errorList, msg)
}
g.By("Verify enablePeerPods is set in kataconfig")
msg, err = oc.AsAdmin().Run("get").Args("kataconfig", kataconfig.name, "-n", subscription.namespace, "-o=jsonpath={.spec.enablePeerPods}").Output()
if err != nil || msg != "true" {
e2e.Logf("STEP ERROR querying kataconfig %v and enablePeerPods setting", kataconfig.name)
errors = errors + 1
errorList = append(errorList, msg)
}
msg = fmt.Sprintf("check runtimeclass for %v", ppRuntimeClass)
g.By(msg)
msg, err = oc.AsAdmin().Run("get").Args("runtimeclass", "-n", subscription.namespace, "--no-headers").Output()
if err != nil || !strings.Contains(msg, ppRuntimeClass) {
e2e.Logf("STEP ERROR runtimeclass %v not found", ppRuntimeClass, msg, err)
errors = errors + 1
errorList = append(errorList, msg)
}
g.By("Check errors")
if errors != 0 {
e2e.Logf("STEP ERROR: %v error areas:\n %v", errors, errorList)
}
o.Expect(errors).To(o.BeZero())
g.By("SUCCESS - cluster has cm and secrets for peerpods")
})
g.It("Author:abhbaner-High-41566-High-41574-deploy & delete a pod with kata runtime", func() {
oc.SetupProject()
var (
msg string
err error
defaultPodName = "-example-41566"
podNs = oc.Namespace()
)
g.By("Deploying pod with kata runtime and verify it")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, newPodName)
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
if err != nil {
e2e.Logf("ERROR: pod %v could not be installed: %v %v", newPodName, msg, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("SUCCESS - Pod with kata runtime installed")
})
// author: [email protected]
g.It("Author:tbuskey-High-43238-Operator prohibits creation of multiple kataconfigs", func() {
var (
kataConfigName2 = kataconfig.name + "2"
configFile string
msg string
err error
expectError = "KataConfig instance already exists, refusing to create a duplicate"
)
g.By("Create 2nd kataconfig file")
configFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", kcTemplate, "-p", "NAME="+kataConfigName2, "-n", subscription.namespace).OutputToFile(getRandomString() + "kataconfig-common.json")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the file of resource is %s", configFile)
g.By("Apply 2nd kataconfig")
//Error from server (A KataConfig instance already exists, refusing to create a duplicate): error when creating "kataconfig2.yaml":
// admission webhook "vkataconfig.kb.io" denied the request: A KataConfig instance already exists, refusing to create a duplicate
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring(expectError))
g.By("Success - cannot apply 2nd kataconfig")
})
g.It("Author:abhbaner-High-41263-Namespace check", func() {
g.By("Checking if ns 'openshift-sandboxed-containers-operator' exists")
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespaces", subscription.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring(subscription.namespace))
g.By("SUCCESS - Namespace check complete")
})
g.It("Author:abhbaner-High-43620-validate podmetrics for pod running kata", func() {
if kataconfig.enablePeerPods {
g.Skip("skipping. metrics are not available on pods with Peer Pods enabled")
}
oc.SetupProject()
var (
msg string
err error
defaultPodName = "example"
podNs = oc.Namespace()
)
g.By("Deploying pod with kata runtime and verify it")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, newPodName)
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
if err != nil {
e2e.Logf("ERROR: %v %v", msg, err)
}
errCheck := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
podMetrics, err := oc.AsAdmin().Run("describe").Args("podmetrics", newPodName, "-n", podNs).Output()
if err != nil {
return false, nil
}
e2e.Logf("Pod metrics output below \n %s ", podMetrics)
o.Expect(podMetrics).To(o.ContainSubstring("Cpu"))
o.Expect(podMetrics).To(o.ContainSubstring("Memory"))
o.Expect(podMetrics).To(o.ContainSubstring("Events"))
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("can not describe podmetrics %v in ns %v", newPodName, podNs))
g.By("SUCCESS - Podmetrics for pod with kata runtime validated")
g.By("TEARDOWN - deleting the kata pod")
})
g.It("Author:abhbaner-High-43617-High-43616-CLI checks pod logs & fetching pods in podNs", func() {
if testrun.workloadToTest == "coco" {
g.Skip("Test not supported with coco")
}
oc.SetupProject()
var (
msg string
err error
defaultPodName = "example"
podNs = oc.Namespace()
)
g.By("Deploying pod with kata runtime and verify it")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, newPodName)
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
e2e.Logf("Pod (with Kata runtime) with name - %v , is installed: %v %v", newPodName, msg, err)
errCheck := wait.Poll(10*time.Second, 200*time.Second, func() (bool, error) {
podlogs, err := oc.AsAdmin().Run("logs").Args("pod/"+newPodName, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podlogs).NotTo(o.BeEmpty())
if strings.Contains(podlogs, "serving on") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Pod %v logs are not getting generated", newPodName))
g.By("SUCCESS - Logs for pods with kata validated")
g.By("TEARDOWN - deleting the kata pod")
})
g.It("Author:abhbaner-High-43514-kata pod displaying correct overhead", func() {
const (
defaultPodName = "example"
ppWebhookDeploymentName = "peer-pods-webhook"
ppVMExtendedResourceEnv = "POD_VM_EXTENDED_RESOURCE"
expPPVmExtendedResourceLimit = "1"
expPPVExtendedResourceRequest = "1"
)
oc.SetupProject()
podNs := oc.Namespace()
g.By("Deploying pod with kata runtime")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, newPodName)
g.By("Verifying pod state")
msg, err := checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
if err != nil {
e2e.Logf("ERROR: unable to get podState %v of %v in namespace %v %v %v", podRunState, newPodName, podNs, msg, err)
}
kataPodObj, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", newPodName, "-n", podNs, "-o=json").Output()
if err != nil {
e2e.Logf("ERROR: unable to get pod: %v in namepsace: %v - error: %v", newPodName, podNs, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
// peerpod webhook erases the pod overhead
g.By("Checking peerpod resources")
if kataconfig.enablePeerPods {
g.By("Fetching peer POD_VM_EXTENDED_RESOURCE defaults from peer-pods-webhook pod")
ppVMResourceDefaults, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", ppWebhookDeploymentName, "-n", subscription.namespace, "-o=jsonpath={.spec.template.spec.containers[?(@.name=='"+ppWebhookDeploymentName+"')].env[?(@.name=='"+ppVMExtendedResourceEnv+"')].value}").Output()
if err != nil {
e2e.Logf("ERROR: unable to get peerpod webhook deployment: %v in namepsace: %v - error: %v", ppWebhookDeploymentName, subscription.namespace, err)
}
o.Expect(err).ToNot(o.HaveOccurred())
gjson.Get(kataPodObj, "spec.containers").ForEach(func(key, container gjson.Result) bool {
e2e.Logf("checking container: %s on pod: %s in namespace: %s ", gjson.Get(container.String(), "name").String(), newPodName, podNs)
ppVMResourceDefaults := strings.Replace(ppVMResourceDefaults, ".", "\\.", -1)
actualResourceLimit := gjson.Get(container.String(), "resources.limits."+ppVMResourceDefaults).String()
if strings.Compare(actualResourceLimit, expPPVmExtendedResourceLimit) != 0 {
e2e.Logf("ERROR: peerpod: %v in namepsace: %v has incorrect pod VM extended resource limit: %v", newPodName, podNs, actualResourceLimit)
}
o.Expect(actualResourceLimit).To(o.Equal(expPPVmExtendedResourceLimit))
actualResourceRequest := gjson.Get(container.String(), "resources.requests."+ppVMResourceDefaults).String()
if strings.Compare(actualResourceRequest, expPPVExtendedResourceRequest) != 0 {
e2e.Logf("ERROR: peerpod: %v in namepsace: %v has incorrect pod VM extended resource request: %v", newPodName, podNs, actualResourceRequest)
}
o.Expect(actualResourceRequest).To(o.Equal(expPPVExtendedResourceRequest))
return true
})
}
g.By("Checking Kata pod overhead")
// for non-peer kata pods, overhead is expected to be same as set in runtimeclass
runtimeClassObj, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("runtimeclass", kataconfig.runtimeClassName, "-o=json").Output()
if err != nil {
e2e.Logf("ERROR: unable to get runtimeclass: %v - error: %v", kataconfig.runtimeClassName, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
actualCpu := gjson.Get(kataPodObj, "spec.overhead.cpu").String()
expectedCpu := gjson.Get(runtimeClassObj, "overhead.podFixed.cpu").String()
if strings.Compare(expectedCpu, actualCpu) != 0 {
e2e.Logf("ERROR: kata pod: %v in namepsace: %v has incorrect cpu overhead: %v", newPodName, podNs, actualCpu)
}
o.Expect(expectedCpu).To(o.Equal(actualCpu))
actualMem := gjson.Get(kataPodObj, "spec.overhead.memory").String()
expectedMem := gjson.Get(runtimeClassObj, "overhead.podFixed.memory").String()
if strings.Compare(expectedMem, actualMem) != 0 {
e2e.Logf("ERROR: kata pod: %v in namepsace: %v has incorrect memory overhead: %v", newPodName, podNs, actualMem)
}
o.Expect(expectedMem).To(o.Equal(actualMem))
g.By("SUCCESS - kata pod overhead verified")
g.By("TEARDOWN - deleting the kata pod")
})
// author: [email protected]
g.It("Author:tbuskey-High-43619-oc admin top pod metrics works for pods that use kata runtime", func() {
if kataconfig.enablePeerPods {
g.Skip("skipping. metrics are not in oc admin top pod with Peer Pods enabled")
}
oc.SetupProject()
var (
podNs = oc.Namespace()
podName string
err error
msg string
waitErr error
metricCount = 0
)
g.By("Deploy a pod with kata runtime")
podName = createKataPod(oc, podNs, defaultPod, "admtop", kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, podName)
msg, err = checkResourceJsonpath(oc, "pod", podName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
g.By("Get oc top adm metrics for the pod")
snooze = 360
waitErr = wait.Poll(10*time.Second, snooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "pod", "-n", podNs, podName, "--no-headers").Output()
if err == nil { // Will get error with msg: error: metrics not available yet
metricCount = len(strings.Fields(msg))
}
if metricCount == 3 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, "metrics never appeared")
if metricCount == 3 {
e2e.Logf("metrics for pod %v", msg)
}
o.Expect(metricCount).To(o.Equal(3))
g.By("Success")
})
g.It("Author:abhbaner-High-43516-operator is available in CatalogSource", func() {
g.By("Checking catalog source for the operator")
opMarketplace, err := oc.AsAdmin().Run("get").Args("packagemanifests", "-n", "openshift-marketplace").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(opMarketplace).NotTo(o.BeEmpty())
o.Expect(opMarketplace).To(o.ContainSubstring("sandboxed-containers-operator"))
o.Expect(opMarketplace).To(o.ContainSubstring("Red Hat Operators"))
g.By("SUCCESS - 'sandboxed-containers-operator' is present in packagemanifests")
})
g.It("Author:tbuskey-High-43523-Monitor deletion[Disruptive][Serial][Slow]", func() {
g.By("Delete kataconfig and verify it")
msg, err := deleteKataConfig(oc, kataconfig.name)
e2e.Logf("kataconfig %v was deleted\n--------- %v %v", kataconfig.name, msg, err)
g.By("SUCCESS")
})
g.It("Author:tbuskey-High-41813-Build Acceptance test with deletion[Disruptive][Serial][Slow]", func() {
g.Skip("kataconfig deletion steps are skipped")
//This test will install operator,kataconfig,pod with kata - delete pod, delete kataconfig
oc.SetupProject()
var (
msg string
err error
defaultPodName = "example"
podNs = oc.Namespace()
)
g.By("Deploying pod and verify it")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
e2e.Logf("Pod (with Kata runtime) with name - %v , is installed: %v %v", newPodName, msg, err)
g.By("Deleting pod")
deleteKataResource(oc, "pod", podNs, newPodName)
g.By("Deleting kataconfig")
msg, err = deleteKataConfig(oc, kataconfig.name)
e2e.Logf("common kataconfig %v was deleted %v %v", kataconfig.name, msg, err)
g.By("SUCCESSS - build acceptance passed")
})
// author: [email protected]
g.It("Author:tbuskey-High-46235-Kata Metrics Verify that Namespace is labeled to enable monitoring", func() {
var (
err error
msg string
s string
label = ""
hasMetrics = false
)
g.By("Get labels of openshift-sandboxed-containers-operator namespace to check for monitoring")
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", "openshift-sandboxed-containers-operator", "-o=jsonpath={.metadata.labels}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, s = range strings.SplitAfter(msg, ",") {
if strings.Contains(s, "openshift.io/cluster-monitoring") {
label = s
if strings.Contains(strings.SplitAfter(s, ":")[1], "true") {
hasMetrics = true
}
}
}
e2e.Logf("Label is %v", label)
o.Expect(hasMetrics).To(o.BeTrue())
g.By("Success")
})
g.It("Author:abhbaner-High-43524-Existing deployments (with runc) should restart normally after kata runtime install", func() {
oc.SetupProject()
var (
podNs = oc.Namespace()
deployName = "dep-43524-" + getRandomString()
msg string
podName string
newPodName string
)
g.By("Create deployment config from template")
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName,
"-p", "IMAGE="+testrun.workloadImage).OutputToFile(getRandomString() + "dep-common.json")
if err != nil {
e2e.Logf("Could not create configFile %v %v", configFile, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Applying deployment file " + configFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", configFile, "-n", podNs).Output()
if err != nil {
e2e.Logf("Could not apply configFile %v %v", msg, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for deployment to be ready")
defer oc.AsAdmin().Run("delete").Args("deploy", "-n", podNs, deployName, "--ignore-not-found").Execute()
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).NotTo(o.BeEmpty())
// If the deployment is ready, pod will be. Might not need this
g.By("Wait for pods to be ready")
errCheck := wait.Poll(10*time.Second, 600*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().Run("get").Args("pods", "-n", podNs, "--no-headers").Output()
if !strings.Contains(msg, "No resources found") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Timed out waiting for pods %v %v", msg, err))
g.By("Get pod name")
msg, err = oc.AsAdmin().Run("get").Args("pods", "-n", podNs, "--no-headers").Output()
podName = strings.Split(msg, " ")[0]
e2e.Logf("podname %v %v", msg, err)
msg = fmt.Sprintf("Deleting pod %v from deployment", podName)
g.By(msg)
msg, err = deleteResource(oc, "pod", podName, podNs, podSnooze*time.Second, 10*time.Second)
e2e.Logf("%v pod deleted: %v %v", podName, msg, err)
g.By("Wait for deployment to re-replicate")
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).NotTo(o.BeEmpty())
g.By("Get new pod name")
msg, err = oc.AsAdmin().Run("get").Args("pods", "-n", podNs, "--no-headers").Output()
newPodName = strings.Split(msg, " ")[0]
e2e.Logf("new podname %v %v", msg, err)
if newPodName == podName {
e2e.Failf("A new pod did not get created")
}
g.By("SUCCESSS - kataconfig installed and post that pod with runc successfully restarted ")
msg, err = deleteResource(oc, "deploy", deployName, podNs, podSnooze*time.Second, 10*time.Second)
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:tbuskey-High-42167-Must-gather collects sandboxed operator logs[Serial]", func() {
g.Skip("mustgather test must be done manually")
type counts struct {
audits int
crio int
qemuLogs int
qemuVersion int
describeCsv int
describeKc int
describeServices int
describeSub int
describeVwebhook int
}
oc.SetupProject()
var (
deployConfigFile = ""
deployName = "mg-42167-" + getRandomString()
deploymentFile = getRandomString() + "dep-common.json"
podNs = oc.Namespace()
err error
fails = 0
kcLogLevel = "{\"spec\":{\"logLevel\":\"debug\"}}"
logFile string
mustgatherFiles = []string{""}
mustgatherName = "mustgather" + getRandomString()
mustgatherDir = "/tmp/" + mustgatherName
mustgatherLog = mustgatherName + ".log"
msg string
nodeControlCount int
nodeWorkerCount int
singleNode = false
isWorker = false
)
mustgatherChecks := counts{
audits: 0,
crio: 0,
qemuLogs: 0,
qemuVersion: 0,
describeCsv: 0,
describeKc: 0,
describeServices: 0,
describeSub: 0,
describeVwebhook: 0,
}
nodeControlList, err := exutil.GetClusterNodesBy(oc, "master")
msgIfErr := fmt.Sprintf("getClusterNodesBy master %v %v", nodeControlList, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
nodeControlCount = len(nodeControlList)
nodeWorkerList, err := exutil.GetClusterNodesBy(oc, "worker")
msgIfErr = fmt.Sprintf("getClusterNodesBy worker %v %v", nodeWorkerList, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
nodeWorkerCount = len(nodeWorkerList)
mustgatherExpected := counts{
audits: 1,
crio: nodeWorkerCount + nodeControlCount,
qemuLogs: nodeWorkerCount, // Need to change from deployment
qemuVersion: nodeWorkerCount,
describeCsv: 1,
describeKc: 1,
describeServices: 1,
describeSub: 1,
describeVwebhook: 1,
}
// for SNO
if nodeWorkerCount == 1 && !strings.Contains(nodeWorkerList[0], "worker") {
singleNode = true
mustgatherExpected.crio = nodeWorkerCount
}
// patch kataconfig for debug
_, _ = oc.AsAdmin().Run("patch").Args("kataconfig", kataconfig.name, "-n", subscription.namespace, "--type", "merge", "--patch", kcLogLevel).Output()
msg, err = waitForNodesInDebug(oc, subscription.namespace)
e2e.Logf("%v", msg)
/* Create a deployment file from template with N replicas where N=worker nodes
It does not ensure that there is a replica on each worker node.
Loop because on 4.12 SNO, nodes might not respond at 1st
error: unable to process template
service unavailable
exit status 1 */
errCheck := wait.Poll(10*time.Second, 360*time.Second, func() (bool, error) {
deployConfigFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "NAMESPACE="+podNs, "-p", "REPLICAS="+fmt.Sprintf("%v", nodeWorkerCount),
"-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName, "-p", "IMAGE="+testrun.workloadImage).OutputToFile(deploymentFile)
if strings.Contains(deployConfigFile, deploymentFile) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Error: Unable to create deployment file from template: %v %v", deployConfigFile, err))
o.Expect(deployConfigFile).NotTo(o.BeEmpty(), "empty deploy file error %v", err)
_, err = oc.AsAdmin().Run("apply").Args("-f", deployConfigFile, "-n", podNs).Output()
defer oc.AsAdmin().Run("delete").Args("deploy", "-n", podNs, deployName, "--ignore-not-found").Execute()
msg, err = waitForDeployment(oc, podNs, deployName)
msgIfErr = fmt.Sprintf("ERROR: waitForDeployment %v: %v %v", deployName, msg, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(msg).NotTo(o.BeEmpty(), msgIfErr)
defer os.RemoveAll(mustgatherDir)
logFile, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("-n", subscription.namespace, "must-gather", "--image="+testrun.mustgatherImage, "--dest-dir="+mustgatherDir).OutputToFile(mustgatherLog)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: mustgather %v has an error %v %v", mustgatherLog, logFile, err))
files, err := os.ReadDir(mustgatherDir)
msgIfErr = fmt.Sprintf("ERROR %v contents %v\n%v", mustgatherDir, err, files)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(files).NotTo(o.BeEmpty(), msgIfErr)
err = filepath.Walk(mustgatherDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
e2e.Logf("Error on %v: %v", path, err)
return err
}
isWorker = false
for _, worker := range nodeWorkerList {
if strings.Contains(path, worker) {
isWorker = true
break
}
}
if info.IsDir() { // qemu will create a directory but might not create files
if isWorker == true && strings.Contains(path, "/run/vc/crio/fifo") && !strings.Contains(path, "/run/vc/crio/fifo/io") {
mustgatherChecks.qemuLogs++
}
}
if !info.IsDir() {
mustgatherFiles = append(mustgatherFiles, path)
if strings.Contains(path, "audit.log") {
mustgatherChecks.audits++
}
if strings.Contains(path, "/nodes/") {
if strings.Contains(path, "_logs_crio") {
mustgatherChecks.crio++
}
// in SNO, no worker, just master
if (isWorker == true || (singleNode == true && isWorker != true)) && strings.Contains(path, "/version") {
mustgatherChecks.qemuVersion++
}
}
if strings.Contains(path, "/sandboxed-containers") {
if strings.Contains(path, "/clusterserviceversion_description") {
mustgatherChecks.describeCsv++
}
if strings.Contains(path, "/kataconfig_description") {
mustgatherChecks.describeKc++
}
if strings.Contains(path, "/services_description") {
mustgatherChecks.describeServices++
}
if strings.Contains(path, "/subscription_description") {
mustgatherChecks.describeSub++
}
if strings.Contains(path, "/validatingwebhookconfigurations_description") {
mustgatherChecks.describeVwebhook++
}
}
}
return nil
})
e2e.Logf("%v files in must-gather dir %v", len(mustgatherFiles), mustgatherDir)
e2e.Logf("expected: %v", mustgatherExpected)
e2e.Logf("actual : %v", mustgatherChecks)
e2e.Logf("mustgatherChecks.audits : %v", mustgatherChecks.audits)
if mustgatherChecks.audits < mustgatherExpected.audits {
e2e.Logf("Audit logs (%v) not found on any worker nodes (%v)", mustgatherChecks.audits, mustgatherExpected.audits)
fails++
}
e2e.Logf("mustgatherChecks.crio : %v", mustgatherChecks.crio)
if mustgatherChecks.crio != (mustgatherExpected.crio) {
e2e.Logf("crio logs (%v) did exist on all nodes (%v)", mustgatherChecks.crio, (mustgatherExpected.crio))
fails++
}
// A deployment will place VMs based on loads
// to ensure a VM is on each node another method is needed
e2e.Logf("mustgatherChecks.qemuLogs : %v", mustgatherChecks.qemuLogs)
if mustgatherChecks.qemuLogs != mustgatherExpected.qemuLogs {
e2e.Logf("qemu log directory (%v) does not exist on all worker nodes (%v), is ok", mustgatherChecks.qemuLogs, mustgatherExpected.qemuLogs)
// VMs should be 1 on each worker node but k8s might put 2 on a node & 0 on another per node load
if !singleNode && mustgatherChecks.qemuLogs < 1 { // because deployment is used
fails++
}
}
e2e.Logf("mustgatherChecks.qemuVersion : %v", mustgatherChecks.qemuVersion)
if mustgatherChecks.qemuVersion != mustgatherExpected.qemuVersion {
e2e.Logf("rpm version log (%v) did not exist on worker nodes (%v)", mustgatherChecks.qemuVersion, mustgatherExpected.qemuVersion)
fails++
}
e2e.Logf("mustgatherChecks.describeCsv : %v", mustgatherChecks.describeCsv)
if mustgatherChecks.describeCsv != mustgatherExpected.describeCsv {
e2e.Logf("describeCsv (%v) did not exist", mustgatherChecks.describeCsv)
fails++
}
e2e.Logf("mustgatherChecks.describeKc : %v", mustgatherChecks.describeKc)
if mustgatherChecks.describeKc != mustgatherExpected.describeKc {
e2e.Logf("describeKc (%v) did not exist", mustgatherChecks.describeKc)
fails++
}
e2e.Logf("mustgatherChecks.describeServices : %v", mustgatherChecks.describeServices)
if mustgatherChecks.describeServices != mustgatherExpected.describeServices {
e2e.Logf("describeServices (%v) did not exist", mustgatherChecks.describeServices)
fails++
}
e2e.Logf("mustgatherChecks.describeSub : %v", mustgatherChecks.describeSub)
if mustgatherChecks.describeSub != mustgatherExpected.describeSub {
e2e.Logf("describeSub (%v) did not exist", mustgatherChecks.describeSub)
fails++
}
e2e.Logf("mustgatherChecks.describeVwebhook : %v", mustgatherChecks.describeVwebhook)
if mustgatherChecks.describeVwebhook != mustgatherExpected.describeVwebhook {
e2e.Logf("describeVwebhook (%v) did not exist", mustgatherChecks.describeVwebhook)
fails++
}
o.Expect(fails).To(o.Equal(0), fmt.Sprintf("%v logs did not match expectd results\n%v", fails, mustgatherExpected))
e2e.Logf("STEP: SUCCESS")
})
// author: [email protected]
g.It("Longduration-Author:tbuskey-High-53583-upgrade osc operator by changing subscription [Disruptive][Serial]", func() {
g.Skip("Upgrade tests should be manually done")
var (
subscriptionUpgrade = subscription
kataconfigUpgrade = kataconfig
testrunUpgradeWithSubscription = testrun
testrunConfigmapName = "osc-config-upgrade-subscription"
msg string
msgIfErr string
)
testrunUpgradeWithSubscription.checked = false
upgradeConfigMapExists, err := getTestRunParameters(oc, &subscriptionUpgrade, &kataconfigUpgrade, &testrunUpgradeWithSubscription, testrunConfigmapNs, testrunConfigmapName)
if err != nil {
e2e.Failf("ERROR: testrunUpgradeWithSubscription configmap %v errors: %v\n%v", testrunUpgradeWithSubscription, err)
}
if !upgradeConfigMapExists {
msg = fmt.Sprintf("SKIP: %v configmap does not exist. Cannot upgrade by changing subscription", testrunConfigmapName)
g.Skip(msg)
}
if testrunUpgradeWithSubscription.redirectNeeded {
if ocpMajorVer == "4" && minorVer <= 12 {
redirectType = "ImageContentSourcePolicy"
redirectFile = filepath.Join(testDataDir, "ImageContentSourcePolicy-brew.yaml")
}
err = applyImageRedirect(oc, redirectFile, redirectType, redirectName)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
}
if testrunUpgradeWithSubscription.catalogSourceName != subscription.catalogSourceName {
waitForCatalogReadyOrFail(oc, testrunUpgradeWithSubscription.catalogSourceName)
g.By("Check catalog for " + subscriptionUpgrade.subName)
label := fmt.Sprintf("catalog=%v", testrunUpgradeWithSubscription.catalogSourceName)
errCheck := wait.Poll(10*time.Second, 240*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().Run("get").Args("packagemanifest", "-l", label, "-n", subscriptionUpgrade.catalogSourceNamespace).Output()
if strings.Contains(msg, subscriptionUpgrade.subName) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v is not in the %v catalog. Cannot change subscription: %v %v", subscriptionUpgrade.subName, testrunUpgradeWithSubscription.catalogSourceName, msg, err))
msg, err = changeSubscriptionCatalog(oc, subscriptionUpgrade, testrunUpgradeWithSubscription)
msgIfErr = fmt.Sprintf("ERROR: patching the subscription catalog %v failed %v %v", subscriptionUpgrade, msg, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(msg).NotTo(o.BeEmpty(), msgIfErr)
msg, err = subscriptionIsFinished(oc, subscriptionUpgrade)
msgIfErr = fmt.Sprintf("ERROR: subscription wait for catalog patch %v failed %v %v", subscriptionUpgrade, msg, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(msg).NotTo(o.BeEmpty(), msgIfErr)
}
if testrunUpgradeWithSubscription.channel != subscription.channel {
g.By("Changing the subscription channel")
msg, err = changeSubscriptionChannel(oc, subscriptionUpgrade, testrunUpgradeWithSubscription)
msgIfErr = fmt.Sprintf("ERROR: patching the subscription channel %v: %v %v", subscriptionUpgrade, msg, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(msg).NotTo(o.BeEmpty(), msgIfErr)
// all pods restart & subscription gets recreated
msg, err = subscriptionIsFinished(oc, subscriptionUpgrade)
msgIfErr = fmt.Sprintf("ERROR: subscription wait after channel changed %v: %v %v", subscriptionUpgrade, msg, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(msg).NotTo(o.BeEmpty(), msgIfErr)
}
})
g.It("Author:vvoronko-High-60231-Scale-up deployment [Serial]", func() {
oc.SetupProject()
var (
podNs = oc.Namespace()
deployName = "dep-60231-" + getRandomString()
initReplicas = 3
maxReplicas = 6
numOfVMs int
msg string
)
kataNodes := exutil.GetNodeListByLabel(oc, kataocLabel)
o.Expect(len(kataNodes) > 0).To(o.BeTrue(), fmt.Sprintf("kata nodes list is empty %v", kataNodes))
if !kataconfig.enablePeerPods {
g.By("Verify no instaces exists before the test")
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
//TO DO wait for some time to enable disposal of previous test instances
o.Expect(numOfVMs).To(o.Equal(0), fmt.Sprintf("initial number of VM instances should be zero"))
}
g.By("Create deployment config from template")
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "REPLICAS="+strconv.Itoa(initReplicas),
"-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName, "-p", "IMAGE="+testrun.workloadImage).OutputToFile(getRandomString() + "dep-common.json")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not create deployment configFile %v", configFile))
g.By("Applying deployment file " + configFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", configFile, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not apply configFile %v", msg))
g.By("Wait for deployment to be ready")
defer oc.AsAdmin().Run("delete").Args("deploy", "-n", podNs, deployName, "--ignore-not-found").Execute()
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
errReplicasMsg := fmt.Sprintf("Deployment %v number of ready replicas don't match requested", deployName)
o.Expect(msg).To(o.Equal(strconv.Itoa(initReplicas)), errReplicasMsg)
if !kataconfig.enablePeerPods {
g.By("Verifying actual number of VM instances")
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
o.Expect(numOfVMs).To(o.Equal(initReplicas), fmt.Sprintf("actual number of VM instances doesn't match"))
}
g.By(fmt.Sprintf("Scaling deployment from %v to %v", initReplicas, maxReplicas))
err = oc.AsAdmin().Run("scale").Args("deployment", deployName, "--replicas="+strconv.Itoa(maxReplicas), "-n", podNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not Scale deployment %v", msg))
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.Equal(strconv.Itoa(maxReplicas)), errReplicasMsg)
if !kataconfig.enablePeerPods {
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
o.Expect(numOfVMs).To(o.Equal(maxReplicas), fmt.Sprintf("actual number of VM instances doesn't match"))
}
g.By("SUCCESSS - deployment scale-up finished successfully")
})
g.It("Author:vvoronko-High-60233-Scale-down deployment [Serial]", func() {
oc.SetupProject()
var (
podNs = oc.Namespace()
deployName = "dep-60233-" + getRandomString()
initReplicas = 6
updReplicas = 3
numOfVMs int
msg string
)
kataNodes := exutil.GetNodeListByLabel(oc, kataocLabel)
o.Expect(len(kataNodes) > 0).To(o.BeTrue(), fmt.Sprintf("kata nodes list is empty %v", kataNodes))
if !kataconfig.enablePeerPods {
g.By("Verify no instaces exists before the test")
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
//TO DO wait for some time to enable disposal of previous test instances
o.Expect(numOfVMs).To(o.Equal(0), fmt.Sprintf("initial number of VM instances should be zero"))
}
g.By("Create deployment config from template")
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "REPLICAS="+strconv.Itoa(initReplicas), "-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName,
"-p", "IMAGE="+testrun.workloadImage).OutputToFile(getRandomString() + "dep-common.json")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not create deployment configFile %v", configFile))
g.By("Applying deployment file " + configFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", configFile, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not apply configFile %v", msg))
g.By("Wait for deployment to be ready")
defer oc.AsAdmin().Run("delete").Args("deploy", "-n", podNs, deployName, "--ignore-not-found").Execute()
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
errReplicasMsg := fmt.Sprintf("Deployment %v number of ready replicas don't match requested", deployName)
o.Expect(msg).To(o.Equal(strconv.Itoa(initReplicas)), errReplicasMsg)
if !kataconfig.enablePeerPods {
g.By("Verifying actual number of VM instances")
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
o.Expect(numOfVMs).To(o.Equal(initReplicas), fmt.Sprintf("actual number of VM instances doesn't match"))
}
g.By(fmt.Sprintf("Scaling deployment from %v to %v", initReplicas, updReplicas))
err = oc.AsAdmin().Run("scale").Args("deployment", deployName, "--replicas="+strconv.Itoa(updReplicas), "-n", podNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not Scale deployment %v", msg))
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.Equal(strconv.Itoa(updReplicas)), errReplicasMsg)
if !kataconfig.enablePeerPods {
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
o.Expect(numOfVMs).To(o.Equal(updReplicas), fmt.Sprintf("actual number of VM instances doesn't match"))
}
g.By("SUCCESSS - deployment scale-down finished successfully")
})
g.It("Author:vvoronko-High-64043-expose-serice deployment", func() {
oc.SetupProject()
var (
podNs = oc.Namespace()
deployName = "dep-64043-" + getRandomString()
msg string
statusCode = 200
testPageBody = "Hello OpenShift!"
ocpHelloImage = "quay.io/openshifttest/hello-openshift:1.2.0" // should this be testrun.workloadImage?
)
g.By("Create deployment config from template")
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "IMAGE="+ocpHelloImage,
"-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName).OutputToFile(getRandomString() + "dep-common.json")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not create configFile %v", configFile))
g.By("Applying deployment file " + configFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", configFile, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not apply configFile %v", msg))
g.By("Wait for deployment to be ready")
defer oc.AsAdmin().Run("delete").Args("deploy", "-n", podNs, deployName, "--ignore-not-found").Execute()
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Deployment %v didn't reached expected state: %v", deployName, msg))
g.By("Expose deployment and its service")
defer deleteRouteAndService(oc, deployName, podNs)
host, err := createServiceAndRoute(oc, deployName, podNs)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("route host=%v", host)
g.By("send request via the route")
strURL := "http://" + host
resp, err := getHttpResponse(strURL, statusCode)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("send request via the route %v failed with: %v", strURL, err))
o.Expect(resp).To(o.ContainSubstring(testPageBody), fmt.Sprintf("Response doesn't match"))
g.By("SUCCESSS - deployment Expose service finished successfully")
})
g.It("Author:vvoronko-High-63121-Peerpods-cluster-limit [Serial]", func() {
//TODO edge case: check no podvms are up in the air somehow others test will fail
if !kataconfig.enablePeerPods {
g.Skip("63121 podvm limit test is only for peer pods")
}
oc.SetupProject()
var (
podNs = oc.Namespace()
deployName = "dep-63121-" + getRandomString()
podIntLimit = 2
defaultLimit = "10"
kataNodesAmount = len(exutil.GetNodeListByLabel(oc, kataocLabel))
msg string
cleanupRequired = true
)
defer func() {
if cleanupRequired {
e2e.Logf("Cleanup required, restoring to default %v", defaultLimit)
patchPeerPodLimit(oc, opNamespace, defaultLimit)
}
}()
patchPeerPodLimit(oc, opNamespace, strconv.Itoa(podIntLimit))
g.By("Create deployment config from template")
initReplicas := strconv.Itoa(podIntLimit * kataNodesAmount)
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "REPLICAS="+initReplicas,
"-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName, "-p", "IMAGE="+testrun.workloadImage).OutputToFile(getRandomString() + "dep-common.json")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not create deployment configFile %v", configFile))
g.By("Applying deployment file " + configFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", configFile, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not apply configFile %v", msg))
defer deleteKataResource(oc, "deploy", podNs, deployName)
g.By("Wait for deployment to be ready")
msg, err = waitForDeployment(oc, podNs, deployName)
e2e.Logf("Deployment has initially %v pods", msg)
o.Expect(err).NotTo(o.HaveOccurred())
errReplicasMsg := fmt.Sprintf("Deployment %v number of ready replicas don't match requested", deployName)
o.Expect(msg).To(o.Equal(initReplicas), errReplicasMsg)
extraReplicas := strconv.Itoa((podIntLimit + 1) * kataNodesAmount)
g.By(fmt.Sprintf("Scaling deployment from %v to %v", initReplicas, extraReplicas))
msg, err = oc.AsAdmin().Run("scale").Args("deployment", deployName, "--replicas="+extraReplicas, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not Scale deployment %v", msg))
extraPods := strconv.Itoa(kataNodesAmount)
g.By("Wait for 30sec to check deployment has " + extraPods + " pending pods w/o corresponding podvm, because of the limit")
errCheck := wait.Poll(30*time.Second, snooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().Run("get").Args("deploy", "-n", podNs, deployName, "-o=jsonpath={.status.unavailableReplicas}").Output()
if msg == extraPods {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Timed out waiting for %v additional pending pods %v %v", extraPods, msg, err))
msg, err = oc.AsAdmin().Run("get").Args("deploy", "-n", podNs, deployName, "-o=jsonpath={.status.readyReplicas}").Output()
o.Expect(msg).To(o.Equal(initReplicas), errReplicasMsg)
g.By("restore podvm limit")
patchPeerPodLimit(oc, opNamespace, defaultLimit)
cleanupRequired = false
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Deployment has %v running pods after patching the limit", msg)
o.Expect(msg).To(o.Equal(extraReplicas), errReplicasMsg)
g.By("SUCCESSS - deployment peer pods podvm limit - finished successfully")
})
g.It("Author:vvoronko-High-57339-Eligibility", func() {
if !kataconfig.eligibility {
g.Skip("57339-Eligibility test is only for eligibility=true in kataconfig")
}
oc.SetupProject()
kataNodes := exutil.GetNodeListByLabel(oc, kataocLabel)
o.Expect(len(kataNodes) > 0).To(o.BeTrue(), fmt.Sprintf("kata nodes list is empty %v", kataNodes))
eligibleNodes := exutil.GetNodeListByLabel(oc, featureLabel)
o.Expect(len(eligibleNodes) == len(kataNodes)).To(o.BeTrue(), fmt.Sprintf("kata nodes list length is differ from eligible ones"))
for _, node := range kataNodes {
found, _ := exutil.StringsSliceContains(eligibleNodes, node)
o.Expect(found).To(o.BeTrue(), fmt.Sprintf("node %v is not in the list of eligible nodes %v", node, eligibleNodes))
}
})
g.It("Author:vvoronko-High-67650-pod-with-filesystem", func() {
oc.SetupProject()
var (
podNs = oc.Namespace()
pvcName = "pvc-67650-" + getRandomString()
capacity = "2"
)
err := createRWOfilePVC(oc, podNs, pvcName, capacity)
defer oc.WithoutNamespace().AsAdmin().Run("delete").Args("pvc", pvcName, "-n", podNs, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//some platforms provision automatically while others wait got the 1st customer with "Pending" status
//_, err = checkResourceJsonpath(oc, "pvc", pvcName, podNs, "-o=jsonpath={.status.phase}", "Bound", 30*time.Second, 5*time.Second)
//TODO: add a function that takes any pod and know to inject storage part to it)
// run pod with kata
//TODO: test IO
})
g.It("Author:tbuskey-High-66554-Check and verify control plane pods and other components", func() {
var (
duration time.Duration = 300
interval time.Duration = 10
)
testControlPod := func(resType, resName, desiredCountJsonPath, actualCountJsonPath, podLabel string) {
// Check the resource Type for desired count by looking at the jsonpath
// Check the actual count at this jsonpath
// Wait until the actual count == desired count then set expectedPods to the actual count
// Verify count of "Running" pods with podLabel matches expectedPods
expectedPods, msg, err := checkResourceJsonpathMatch(oc, resType, resName, subscription.namespace, desiredCountJsonPath, actualCountJsonPath)
if err != nil || msg == "" {
e2e.Logf("%v does not match %v in %v %v %v %v", desiredCountJsonPath, actualCountJsonPath, resName, resType, msg, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
msg, err = checkLabeledPodsExpectedRunning(oc, subscription.namespace, podLabel, expectedPods)
if err != nil || msg == "" {
e2e.Logf("Could not find pods labeled %v %v %v", podLabel, msg, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
testControlPod("deployment", "controller-manager", "-o=jsonpath={.spec.replicas}", "-o=jsonpath={.status.readyReplicas}", "control-plane=controller-manager")
testControlPod("daemonset", "openshift-sandboxed-containers-monitor", "-o=jsonpath={.status.desiredNumberScheduled}", "-o=jsonpath={.status.numberReady}", "name=openshift-sandboxed-containers-monitor")
if kataconfig.enablePeerPods {
testControlPod("deployment", "peer-pods-webhook", "-o=jsonpath={.spec.replicas}", "-o=jsonpath={.status.readyReplicas}", "app=peer-pods-webhook")
testControlPod("daemonset", "peerpodconfig-ctrl-caa-daemon", "-o=jsonpath={.status.desiredNumberScheduled}", "-o=jsonpath={.status.numberReady}", "name=peerpodconfig-ctrl-caa-daemon")
// Check for the peer pod RuntimeClass
msg, err := checkResourceExists(oc, "RuntimeClass", ppRuntimeClass, subscription.namespace, duration, interval)
if err != nil || msg == "" {
e2e.Logf("Could not find %v in RuntimeClass %v %v", ppRuntimeClass, msg, err)
}
// and kata RuntimeClass
msg, err = checkResourceExists(oc, "RuntimeClass", "kata", subscription.namespace, duration, interval)
if err != nil || msg == "" {
e2e.Logf("Could not find kata in RuntimeClass %v %v", msg, err)
}
}
})
g.It("Author:tbuskey-High-68945-Check FIPS on pods", func() {
if !clusterHasEnabledFIPS(oc, subscription.namespace) {
g.Skip("The cluster does not have FIPS enabled")
}
oc.SetupProject()
podNamespace := oc.Namespace()
podName := createKataPod(oc, podNamespace, defaultPod, "pod68945", kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNamespace, podName)
msg, err := checkResourceJsonpath(oc, "pod", podName, podNamespace, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: pod %v could not be created: %v %v", podName, msg, err))
msgIfErr := "ERROR: The cluster is in FIPS but pods are not"
// check that the pod(vm) booted with fips
podCmdline, podCmdlineErr := oc.AsAdmin().Run("rsh").Args("-T", "-n", podNamespace, podName, "cat", "/proc/cmdline").Output()
if podCmdlineErr != nil || !strings.Contains(podCmdline, "fips=1") {
msgIfErr = fmt.Sprintf("%v\nERROR: %v did not boot with fips enabled:%v %v", msgIfErr, podName, podCmdline, podCmdlineErr)
}
// check that pod(vm) has fips enabled
podFipsEnabled, podFipsEnabledErr := oc.AsAdmin().Run("rsh").Args("-T", "-n", podNamespace, podName, "cat", "/proc/sys/crypto/fips_enabled").Output()
if podFipsEnabledErr != nil || podFipsEnabled != "1" {
msgIfErr = fmt.Sprintf("%v\nERROR: %v does not have fips_enabled: %v %v", msgIfErr, podName, podFipsEnabled, podFipsEnabledErr)
}
// fail with all possible debugging logs included
o.Expect(podCmdlineErr).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(podCmdline).To(o.ContainSubstring("fips=1"), msgIfErr)
o.Expect(podFipsEnabledErr).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(podFipsEnabled).To(o.Equal("1"), msgIfErr)
})
g.It("Author:vvoronko-High-68930-deploy peerpod with type annotation", func() {
if testrun.workloadToTest == "coco" {
g.Skip("Test not supported with coco")
}
oc.SetupProject()
var (
basePodName = "-example-68930"
podNs = oc.Namespace()
annotations = map[string]string{
"MEMORY": "256",
"CPU": "0",
"INSTANCESIZE": "",
}
instanceSize = map[string]string{
"aws": "t3.xlarge",
"azure": "Standard_D4as_v5",
}
)
provider := getCloudProvider(oc)
val, ok := instanceSize[provider]
if !(kataconfig.enablePeerPods && ok) {
g.Skip("68930-deploy peerpod with type annotation supported only for kata-remote on AWS and AZURE")
}
annotations["INSTANCESIZE"] = val
g.By("Deploying pod with kata runtime and verify it")
podName, err := createKataPodAnnotated(oc, podNs, podAnnotatedTemplate, basePodName, kataconfig.runtimeClassName, testrun.workloadImage, annotations)
defer deleteKataResource(oc, "pod", podNs, podName)
o.Expect(err).NotTo(o.HaveOccurred())
actualSize, err := getPeerPodMetadataInstanceType(oc, podNs, podName, provider)
e2e.Logf("Podvm with required instance type %v was launched as %v", instanceSize[provider], actualSize)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed rsh to pod %v to provide metadata: %v", podName, err))
o.Expect(actualSize).To(o.Equal(instanceSize[provider]), fmt.Sprintf("Instance size don't match provided annotations: %v", err))
g.By("SUCCESS - Podvm with required instance type was launched")
})
g.It("Author:vvoronko-High-69018-deploy peerpod with default vcpu and memory", func() {
if testrun.workloadToTest == "coco" {
g.Skip("Test not supported with coco")
}
oc.SetupProject()
var (
basePodName = "-example-69018"
podNs = oc.Namespace()
annotations = map[string]string{
"MEMORY": "6000",
"CPU": "2",
"INSTANCESIZE": "",
}
instanceSize = map[string]string{
"aws": "t3.large",
"azure": "Standard_D2as_v5",
}
)
provider := getCloudProvider(oc)
val, ok := instanceSize[provider]
if !(kataconfig.enablePeerPods && ok) {
g.Skip("69018-deploy peerpod with type annotation not supported on " + provider)
}
annotations["INSTANCESIZE"] = val
g.By("Deploying pod with kata runtime and verify it")
podName, err := createKataPodAnnotated(oc, podNs, podAnnotatedTemplate, basePodName, kataconfig.runtimeClassName, testrun.workloadImage, annotations)
defer deleteKataResource(oc, "pod", podNs, podName)
o.Expect(err).NotTo(o.HaveOccurred())
actualSize, err := getPeerPodMetadataInstanceType(oc, podNs, podName, provider)
e2e.Logf("Podvm with required instance type %v was launched as %v", instanceSize[provider], actualSize)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed rsh to pod %v to provide metadata: %v", podName, err))
o.Expect(actualSize).To(o.Equal(instanceSize[provider]), fmt.Sprintf("Instance size don't match provided annotations: %v", err))
g.By("SUCCESS - Podvm with required instance type was launched")
})
g.It("Author:vvoronko-High-69589-deploy kata with cpu and memory annotation", func() {
oc.SetupProject()
var (
basePodName = "-example-69589"
podNs = oc.Namespace()
annotations = map[string]string{
"MEMORY": "1234",
"CPU": "2",
"INSTANCESIZE": "",
}
supportedProviders = []string{"azure", "gcp", "none"}
memoryOptions = fmt.Sprintf("-m %vM", annotations["MEMORY"])
)
provider := getCloudProvider(oc)
if kataconfig.enablePeerPods || !slices.Contains(supportedProviders, provider) {
g.Skip("69589-deploy kata with type annotation supported only for kata runtime on platforms with nested virtualization enabled")
}
g.By("Deploying pod with kata runtime and verify it")
podName, err := createKataPodAnnotated(oc, podNs, podAnnotatedTemplate, basePodName, kataconfig.runtimeClassName, testrun.workloadImage, annotations)
defer deleteKataResource(oc, "pod", podNs, podName)
o.Expect(err).NotTo(o.HaveOccurred())
//get annotations from the live pod
podAnnotations, _ := oc.Run("get").Args("pods", podName, "-o=jsonpath={.metadata.annotations}", "-n", podNs).Output()
podCmd := []string{"-n", oc.Namespace(), podName, "--", "nproc"}
//check CPU available from the kata pod itself by nproc command:
actualCPU, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(podCmd...).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("'oc exec %v' Failed", podCmd))
strErr := fmt.Sprintf("Actual CPU count for the pod %v isn't matching expected %v full annotations:\n%v", actualCPU, annotations["CPU"], podAnnotations)
o.Expect(actualCPU).To(o.Equal(annotations["CPU"]), strErr)
//check MEMORY from the node running kata VM:
nodeName, _ := exutil.GetPodNodeName(oc, podNs, podName)
cmd := "ps -ef | grep uuid | grep -v grep"
vmFlags, err := exutil.DebugNodeWithOptionsAndChroot(oc, nodeName, []string{"-q"}, "bin/sh", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed debug node to get qemu instance options"))
strErr = fmt.Sprintf("VM flags for the pod doesn't contain expected %v full annotations:\n%v", memoryOptions, podAnnotations)
o.Expect(vmFlags).To(o.ContainSubstring(memoryOptions), strErr)
g.By("SUCCESS - KATA pod with required VM instance size was launched")
})
g.It("Author:abhbaner-High-66123-podvm Image ID check peer pods", func() {
var (
msg string
err error
imageID string
)
if !kataconfig.enablePeerPods {
g.Skip("OCP-66123 is only for peerpods")
}
oc.SetupProject()
cloudPlatform := getCloudProvider(oc)
// check if IMAGE ID exists in peer-pod-cm
msg, err, imageID = CheckPodVMImageID(oc, ppConfigMapName, cloudPlatform, opNamespace)
if imageID == "" {
e2e.Logf("IMAGE ID: %v", imageID)
msgIfErr := fmt.Sprintf("ERROR: IMAGE ID could not be retrieved from the peer-pods-cm even after kataconfig install: %v %v %v", imageID, msg, err)
o.Expect(imageID).NotTo(o.BeEmpty(), msgIfErr)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
}
e2e.Logf("The Image ID present in the peer-pods-cm is: %v , msg: %v", imageID, msg)
g.By("SUCCESS - IMAGE ID check complete")
})
g.It("Author:tbuskey-Medium-70824-Catalog upgrade osc operator [Disruptive]", func() {
g.Skip("Upgrade tests should be manually done")
upgradeCatalog := UpgradeCatalogDescription{
name: "osc-config-upgrade-catalog",
namespace: "default",
exists: false,
imageAfter: "",
imageBefore: "",
catalogName: subscription.catalogSourceName,
}
err := getUpgradeCatalogConfigMap(oc, &upgradeCatalog)
if !upgradeCatalog.exists {
skipMessage := fmt.Sprintf("%v configmap for Catalog upgrade does not exist", upgradeCatalog.name)
g.Skip(skipMessage)
}
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: could not get %v configmap in ns %v %v", upgradeCatalog.name, upgradeCatalog.namespace, err))
// what is the current CSV name?
csvNameBefore, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", subscription.subName, "-n", subscription.namespace, "-o=jsonpath={.status.currentCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: could not get the CSV name of sub %v %v %v", subscription.subName, csvNameBefore, err))
o.Expect(csvNameBefore).NotTo(o.BeEmpty(), fmt.Sprintf("ERROR: the csv name is empty for sub %v", subscription.subName))
// what is the controller-manager pod name?
listOfPodsBefore, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", subscription.namespace, "-l", "control-plane=controller-manager", "-o=jsonpath={.items..metadata.name}").Output()
err = changeCatalogImage(oc, upgradeCatalog.catalogName, upgradeCatalog.imageAfter)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: could not change catalog %v image to %v Error %v", upgradeCatalog.catalogName, upgradeCatalog.imageAfter, err))
e2e.Logf("Waiting for pods (%v) to get replaced", listOfPodsBefore)
waitForPodsToTerminate(oc, subscription.namespace, listOfPodsBefore)
// subscription .status.installedCsv is "AtLatestKnown" & will not changed so it doesn't show subscription is done
// wait until the currentCSV in the sub changes & get the new CSV name
csvNameAfter, _ := checkResourceJsonPathChanged(oc, "sub", subscription.subName, subscription.namespace, "-o=jsonpath={.status.currentCSV}", csvNameBefore, 300*time.Second, 10*time.Second)
e2e.Logf("Watch CSV %v to show Succeed", csvNameAfter)
_, _ = checkResourceJsonpath(oc, "csv", csvNameAfter, subscription.namespace, "-o=jsonpath={.status.phase}", "Succeeded", 300*time.Second, 10*time.Second)
})
g.It("Author:vvoronko-High-C00210-run [peerpodGPU] cuda-vectoradd", func() {
oc.SetupProject()
var (
basePodName = "-example-00210"
cudaImage = "nvidia/samples:vectoradd-cuda11.2.1"
podNs = oc.Namespace()
instanceSize = map[string]string{
"aws": "g5.2xlarge",
"azure": "Standard_NC8as_T4_v3",
}
phase = "Succeeded"
logPassed = "Test PASSED"
)
if !(kataconfig.enablePeerPods && testrun.enableGPU) {
g.Skip("210-run peerpod with GPU cuda-vectoradd supported only with GPU enabled in podvm")
}
instance := instanceSize[getCloudProvider(oc)]
g.By("Deploying pod with kata runtime and verify it")
newPodName := getRandomString() + basePodName
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", podAnnotatedTemplate,
"-p", "NAME="+newPodName,
"-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName,
"-p", "INSTANCESIZE="+instance,
"-p", "IMAGE="+cudaImage).OutputToFile(getRandomString() + "Pod-common.json")
o.Expect(err).NotTo(o.HaveOccurred())
podName, err := createKataPodFromTemplate(oc, podNs, newPodName, configFile, kataconfig.runtimeClassName, phase)
defer deleteKataResource(oc, "pod", podNs, podName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("vectoradd-cuda on peer pod with GPU instance type %v reached %v phase", instance, phase)
//verify the log of the pod
log, err := exutil.GetSpecificPodLogs(oc, podNs, "", podName, "")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("unable to get the pod (%s) logs", podName))
o.Expect(log).To(o.ContainSubstring(logPassed), "required lines are missing in log")
g.By("SUCCESS - Podvm with GPU instance type was launched successfully")
})
g.It("Author:Anjana-High-43221-Verify PodVM image creation job completion", func() {
if getCloudProvider(oc) != "libvirt" {
g.Skip("43221 PodVM image creation job is specific to libvirt")
}
if !kataconfig.enablePeerPods {
g.Skip("43221 PodVM image creation job is only for peer pods")
}
g.By("Checking the status of the PodVM image creation job")
msg, err := verifyImageCreationJobSuccess(oc, opNamespace, ppParam, ppParamsLibvirtConfigMapName, cloudPlatform)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
expMsg := "Uploaded the image successfully"
o.Expect(strings.Contains(msg, expMsg)).To(o.BeTrue(), fmt.Sprintf("Expected message: %v not found in the job output.", expMsg))
g.By("SUCCESS - PodVM image creation job completed successfully")
})
g.It("Author:Anjana-High-422081-Verify SE-enabled pod deployment", func() {
if getCloudProvider(oc) != "libvirt" {
g.Skip("422081 SE-enabled pod deployment is specific to libvirt")
}
oc.SetupProject()
var (
msg string
err error
defaultPodName = "-se-check"
podNs = oc.Namespace()
)
g.By("Deploying pod to verify SE enablement")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer func() {
deleteKataResource(oc, "pod", podNs, newPodName)
g.By("Deleted SE-enabled pod")
}()
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
if err != nil {
e2e.Logf("ERROR: pod %v could not be installed: %v %v", newPodName, msg, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("SUCCESS - Pod installed for SE verification")
g.By("Checking if pod is SE-enabled")
err = checkSEEnabled(oc, newPodName, podNs)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
})
g.It("Author:tbuskey-High-C00316-run and verify cosigned pod", func() {
if testrun.workloadToTest != "coco" {
g.Skip("Run and verify cosigned pod is only for workloadToTest = 'coco'")
}
oc.SetupProject()
var (
podName = "ocp-cc-pod"
testNamespace = oc.Namespace()
podLastEventReason string
loopCount int
loopMax = 450
countIncrement = 15
sleepTime = time.Duration(countIncrement) * time.Second
outputFromOc string
)
defer deleteResource(oc, "pod", podName, testNamespace, 90*time.Second, 10*time.Second)
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", trusteeCosignedPodFile, "-n", testNamespace).Output()
if err != nil {
e2e.Logf("Error: applying cosigned pod file %v failed: %v %v", trusteeCosignedPodFile, msg, err)
}
for !strings.Contains(podLastEventReason, "Started") && loopCount < loopMax {
loopCount = loopCount + countIncrement
outputFromOc, err = oc.AsAdmin().WithoutNamespace().Run("events").Args("-o=jsonpath={.items..reason}", "-n", testNamespace).Output()
splitString := strings.Split(outputFromOc, " ")
podLastEventReason = splitString[len(splitString)-1]
e2e.Logf("%v pod event reason: %v", podName, podLastEventReason)
if strings.Contains(outputFromOc, "Failed") || loopCount >= loopMax {
err = fmt.Errorf("pod %v failed err: %v timeout: %v of %v\n\n", podName, err, loopCount, loopMax)
}
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: %v", err))
time.Sleep(sleepTime)
}
})
g.It("Author:vvoronko-High-C00317-delete operator with running workload [Serial]", func() {
oc.SetupProject()
var (
msg string
err error
defaultPodName = "-example-00317"
podNs = oc.Namespace()
)
g.By("Deploying pod with kata runtime and verify it")
fstPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, fstPodName)
msg, err = checkResourceJsonpath(oc, "pod", fstPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: pod %v could not be installed: %v %v", fstPodName, msg, err))
g.By("delete csv and sub")
msg, err = deleteOperator(oc, subscription)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("msg:%v err:%v", msg, err))
g.By("verify control plane pods are running")
if kataconfig.enablePeerPods {
msg, err = testControlPod(oc, subscription.namespace, "daemonset", "peerpodconfig-ctrl-caa-daemon",
"-o=jsonpath={.status.desiredNumberScheduled}", "-o=jsonpath={.status.numberReady}", "name=peerpodconfig-ctrl-caa-daemon")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("msg:%v err:%v", msg, err))
msg, err = testControlPod(oc, subscription.namespace, "deployment", "peer-pods-webhook",
"-o=jsonpath={.spec.replicas}", "-o=jsonpath={.status.readyReplicas}", "app=peer-pods-webhook")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("msg:%v err:%v", msg, err))
}
msg, err = testControlPod(oc, subscription.namespace, "daemonset", "openshift-sandboxed-containers-monitor",
"-o=jsonpath={.status.desiredNumberScheduled}", "-o=jsonpath={.status.numberReady}", "name=openshift-sandboxed-containers-monitor")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("msg:%v err:%v", msg, err))
g.By("monitor the 1st pod is still running")
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", fstPodName, "-n", podNs, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: pod %v is not in expected state: %v, actual is: %v %v", fstPodName, podRunState, msg, err))
//launch another pod
secPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, secPodName)
msg, err = checkResourceJsonpath(oc, "pod", secPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: pod %v could not be installed: %v %v", secPodName, msg, err))
g.By("SUCCESS - operator deleted while workload keep running")
})
g.It("Author:vvoronko-High-C00999-deploy peerpod with tags", func() {
if !(testrun.workloadToTest == "peer-pods" && getCloudProvider(oc) == "azure") {
g.Skip("Test supported only with peer-pods on Azure since AWS tags disabled for metadata by default")
}
oc.SetupProject()
var (
basePodName = "-example-00999"
podNs = oc.Namespace()
//works with default configmap value
tagValue = map[string]string{
"aws": "value1",
"azure": "key1:value1;key2:value2", //format is different than in configmap
}
)
provider := getCloudProvider(oc)
g.By("Deploying pod with kata runtime and verify it")
newPodName := createKataPod(oc, podNs, defaultPod, basePodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, newPodName)
msg, err := checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
if err != nil {
e2e.Logf("ERROR: pod %v could not be installed: %v %v", newPodName, msg, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
actualValue, err := getPeerPodMetadataTags(oc, podNs, newPodName, provider)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed rsh to pod %v to provide metadata: %v", newPodName, err))
e2e.Logf("%v pod tags: %v", newPodName, actualValue)
o.Expect(actualValue).To(o.ContainSubstring(tagValue[provider]), fmt.Sprintf("Instance size don't match provided annotations: %v", err))
g.By("SUCCESS - Podvm with required instance type was launched")
})
})
| package kata | ||||
test case | openshift/openshift-tests-private | b7248bdf-7f94-4b70-b115-77971b599c02 | Author:abhbaner-High-39499-Operator installation | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:abhbaner-High-39499-Operator installation", func() {
g.By("Checking sandboxed-operator operator installation")
_, err := subscriptionIsFinished(oc, subscription)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("SUCCESSS - sandboxed-operator operator installed")
}) | ||||||
test case | openshift/openshift-tests-private | a3094595-04d7-483f-9d31-0d01b9b80b09 | Author:abhbaner-High-43522-Common Kataconfig installation | ['"fmt"', '"github.com/tidwall/gjson"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:abhbaner-High-43522-Common Kataconfig installation", func() {
g.Skip("test require structure rework")
g.By("Install Common kataconfig and verify it")
e2e.Logf("common kataconfig %v is installed", kataconfig.name)
err := checkKataconfigIsCreated(oc, subscription, kataconfig.name)
if err != nil {
e2e.Failf("ERROR: kataconfig install failed: %v", err)
}
/* kataconfig status changed so this does not work.
These check should be moved to a function
nodeKataList := getAllKataNodes(oc, kataconfig.eligibility, subscription.namespace, featureLabel, customLabel)
o.Expect(len(nodeKataList) > 0).To(o.BeTrue())
nodeKataCount := fmt.Sprintf("%d", len(nodeKataList))
jsonKataStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kataconfig.name, "-o=jsonpath={.status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
totalCount := gjson.Get(jsonKataStatus, "totalNodesCount").String()
o.Expect(totalCount).To(o.Equal(nodeKataCount))
completeCount := gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesCount").String()
o.Expect(totalCount).To(o.Equal(completeCount))
completededListCount := gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesList.#").String()
o.Expect(completededListCount == totalCount)
e2e.Logf("Completed nodes are %v", gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesList").String())
o.Expect(totalCount).To(o.Equal(nodeKataCount))
completeCount := gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesCount").String()
o.Expect(totalCount).To(o.Equal(completeCount))
completededListCount := gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesList.#").String()
o.Expect(completededListCount == totalCount)
e2e.Logf("Completed nodes are %v", gjson.Get(jsonKataStatus, "installationStatus.completed.completedNodesList").String())
g.By("SUCCESSS - kataconfig installed and it's structure is verified")
*/
}) | |||||
test case | openshift/openshift-tests-private | 566938d8-140c-4eb6-8668-3d209a74ce7a | Author:tbuskey-High-66108-Version in operator CSV should match expected version | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-High-66108-Version in operator CSV should match expected version", func() {
if !testrun.checked {
g.Skip("osc-config cm or OSCSOPERATORVER are not set so there is no expected version to compare")
}
var (
err error
csvName string
csvVersion string
)
csvName, err = oc.AsAdmin().Run("get").Args("sub", subscription.subName, "-n", subscription.namespace, "-o=jsonpath={.status.installedCSV}").Output()
if err != nil || csvName == "" {
e2e.Logf("Error: Not able to get csv from sub %v: %v %v", subscription.subName, csvName, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
csvVersion, err = oc.AsAdmin().Run("get").Args("csv", csvName, "-n", subscription.namespace, "-o=jsonpath={.spec.version}").Output()
if err != nil || csvVersion == "" {
e2e.Logf("Error: Not able to get version from csv %v: %v %v", csvName, csvVersion, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvVersion).NotTo(o.BeEmpty())
cleanVer := strings.Split(testrun.operatorVer, "-")
if csvVersion != cleanVer[0] {
e2e.Logf("Error: expecting %v but CSV has %v", testrun.operatorVer, csvVersion)
}
o.Expect(csvVersion).To(o.Equal(cleanVer[0]))
}) | |||||
test case | openshift/openshift-tests-private | fb8f66a0-1421-4bb3-bcb9-523e37d4e296 | Author:tbuskey-Medium-63122-Checking if cluster is ready for peer pods | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-Medium-63122-Checking if cluster is ready for peer pods", func() {
// can't *VERIFY* all values but we can ensure the cm/secret variables were added by the users
if !kataconfig.enablePeerPods {
g.Skip("STEP Peer pods are not enabled with osc-config or OSCSENABLEPEERPODS")
}
var (
err error
msg string
errors = 0
errorList = []string{""}
)
// set the CLOUD_PROVIDER value from the peerpods configmap
cloudProvider, err := oc.AsAdmin().Run("get").Args("cm", ppConfigMapName, "-n", subscription.namespace, "-o=jsonpath={.data.CLOUD_PROVIDER}").Output()
if err != nil || strings.Contains(cloudProvider, "not found") {
e2e.Logf("STEP ERROR: peerpod configmap issue %v %v", cloudProvider, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
if len(cloudProvider) == 0 {
e2e.Logf("STEP ERROR: CLOUD_PROVIDER is not set on peerpod config")
o.Expect(cloudProvider).ToNot(o.BeZero())
}
msg = fmt.Sprintf("checking %v ", ppSecretName)
g.By(msg)
msg, err = checkPeerPodSecrets(oc, subscription.namespace, cloudProvider, ppSecretName)
if err != nil {
e2e.Logf("%v", msg)
errors = errors + 1
errorList = append(errorList, msg)
}
msg = fmt.Sprintf("checking %v ", ppConfigMapName)
g.By(msg)
msg, err = checkPeerPodConfigMap(oc, subscription.namespace, cloudProvider, ppConfigMapName)
if err != nil {
e2e.Logf("%v", msg)
errors = errors + 1
errorList = append(errorList, msg)
}
g.By("Verify enablePeerPods is set in kataconfig")
msg, err = oc.AsAdmin().Run("get").Args("kataconfig", kataconfig.name, "-n", subscription.namespace, "-o=jsonpath={.spec.enablePeerPods}").Output()
if err != nil || msg != "true" {
e2e.Logf("STEP ERROR querying kataconfig %v and enablePeerPods setting", kataconfig.name)
errors = errors + 1
errorList = append(errorList, msg)
}
msg = fmt.Sprintf("check runtimeclass for %v", ppRuntimeClass)
g.By(msg)
msg, err = oc.AsAdmin().Run("get").Args("runtimeclass", "-n", subscription.namespace, "--no-headers").Output()
if err != nil || !strings.Contains(msg, ppRuntimeClass) {
e2e.Logf("STEP ERROR runtimeclass %v not found", ppRuntimeClass, msg, err)
errors = errors + 1
errorList = append(errorList, msg)
}
g.By("Check errors")
if errors != 0 {
e2e.Logf("STEP ERROR: %v error areas:\n %v", errors, errorList)
}
o.Expect(errors).To(o.BeZero())
g.By("SUCCESS - cluster has cm and secrets for peerpods")
}) | |||||
test case | openshift/openshift-tests-private | 6fcf6c44-2fab-48d4-9377-430d7d73ac55 | Author:abhbaner-High-41566-High-41574-deploy & delete a pod with kata runtime | ['"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:abhbaner-High-41566-High-41574-deploy & delete a pod with kata runtime", func() {
oc.SetupProject()
var (
msg string
err error
defaultPodName = "-example-41566"
podNs = oc.Namespace()
)
g.By("Deploying pod with kata runtime and verify it")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, newPodName)
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
if err != nil {
e2e.Logf("ERROR: pod %v could not be installed: %v %v", newPodName, msg, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("SUCCESS - Pod with kata runtime installed")
}) | |||||
test case | openshift/openshift-tests-private | 9423b35a-68c3-48c0-8677-e9d9be5e9479 | Author:tbuskey-High-43238-Operator prohibits creation of multiple kataconfigs | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-High-43238-Operator prohibits creation of multiple kataconfigs", func() {
var (
kataConfigName2 = kataconfig.name + "2"
configFile string
msg string
err error
expectError = "KataConfig instance already exists, refusing to create a duplicate"
)
g.By("Create 2nd kataconfig file")
configFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", kcTemplate, "-p", "NAME="+kataConfigName2, "-n", subscription.namespace).OutputToFile(getRandomString() + "kataconfig-common.json")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the file of resource is %s", configFile)
g.By("Apply 2nd kataconfig")
//Error from server (A KataConfig instance already exists, refusing to create a duplicate): error when creating "kataconfig2.yaml":
// admission webhook "vkataconfig.kb.io" denied the request: A KataConfig instance already exists, refusing to create a duplicate
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring(expectError))
g.By("Success - cannot apply 2nd kataconfig")
}) | ||||||
test case | openshift/openshift-tests-private | a0a1437c-c7e1-42dc-9b9b-9065f370bee7 | Author:abhbaner-High-41263-Namespace check | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:abhbaner-High-41263-Namespace check", func() {
g.By("Checking if ns 'openshift-sandboxed-containers-operator' exists")
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespaces", subscription.namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring(subscription.namespace))
g.By("SUCCESS - Namespace check complete")
}) | ||||||
test case | openshift/openshift-tests-private | 98b69142-b2ca-42b1-9ea6-50bd260f931b | Author:abhbaner-High-43620-validate podmetrics for pod running kata | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:abhbaner-High-43620-validate podmetrics for pod running kata", func() {
if kataconfig.enablePeerPods {
g.Skip("skipping. metrics are not available on pods with Peer Pods enabled")
}
oc.SetupProject()
var (
msg string
err error
defaultPodName = "example"
podNs = oc.Namespace()
)
g.By("Deploying pod with kata runtime and verify it")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, newPodName)
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
if err != nil {
e2e.Logf("ERROR: %v %v", msg, err)
}
errCheck := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) {
podMetrics, err := oc.AsAdmin().Run("describe").Args("podmetrics", newPodName, "-n", podNs).Output()
if err != nil {
return false, nil
}
e2e.Logf("Pod metrics output below \n %s ", podMetrics)
o.Expect(podMetrics).To(o.ContainSubstring("Cpu"))
o.Expect(podMetrics).To(o.ContainSubstring("Memory"))
o.Expect(podMetrics).To(o.ContainSubstring("Events"))
return true, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("can not describe podmetrics %v in ns %v", newPodName, podNs))
g.By("SUCCESS - Podmetrics for pod with kata runtime validated")
g.By("TEARDOWN - deleting the kata pod")
}) | |||||
test case | openshift/openshift-tests-private | c25eea43-d01c-43de-8d34-72372e6ec0f6 | Author:abhbaner-High-43617-High-43616-CLI checks pod logs & fetching pods in podNs | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:abhbaner-High-43617-High-43616-CLI checks pod logs & fetching pods in podNs", func() {
if testrun.workloadToTest == "coco" {
g.Skip("Test not supported with coco")
}
oc.SetupProject()
var (
msg string
err error
defaultPodName = "example"
podNs = oc.Namespace()
)
g.By("Deploying pod with kata runtime and verify it")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, newPodName)
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
e2e.Logf("Pod (with Kata runtime) with name - %v , is installed: %v %v", newPodName, msg, err)
errCheck := wait.Poll(10*time.Second, 200*time.Second, func() (bool, error) {
podlogs, err := oc.AsAdmin().Run("logs").Args("pod/"+newPodName, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(podlogs).NotTo(o.BeEmpty())
if strings.Contains(podlogs, "serving on") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Pod %v logs are not getting generated", newPodName))
g.By("SUCCESS - Logs for pods with kata validated")
g.By("TEARDOWN - deleting the kata pod")
}) | |||||
test case | openshift/openshift-tests-private | a21388dc-c1c4-4054-b7db-ed470efd7ee3 | Author:abhbaner-High-43514-kata pod displaying correct overhead | ['"strings"', '"time"', '"github.com/tidwall/gjson"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:abhbaner-High-43514-kata pod displaying correct overhead", func() {
const (
defaultPodName = "example"
ppWebhookDeploymentName = "peer-pods-webhook"
ppVMExtendedResourceEnv = "POD_VM_EXTENDED_RESOURCE"
expPPVmExtendedResourceLimit = "1"
expPPVExtendedResourceRequest = "1"
)
oc.SetupProject()
podNs := oc.Namespace()
g.By("Deploying pod with kata runtime")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, newPodName)
g.By("Verifying pod state")
msg, err := checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
if err != nil {
e2e.Logf("ERROR: unable to get podState %v of %v in namespace %v %v %v", podRunState, newPodName, podNs, msg, err)
}
kataPodObj, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", newPodName, "-n", podNs, "-o=json").Output()
if err != nil {
e2e.Logf("ERROR: unable to get pod: %v in namepsace: %v - error: %v", newPodName, podNs, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
// peerpod webhook erases the pod overhead
g.By("Checking peerpod resources")
if kataconfig.enablePeerPods {
g.By("Fetching peer POD_VM_EXTENDED_RESOURCE defaults from peer-pods-webhook pod")
ppVMResourceDefaults, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", ppWebhookDeploymentName, "-n", subscription.namespace, "-o=jsonpath={.spec.template.spec.containers[?(@.name=='"+ppWebhookDeploymentName+"')].env[?(@.name=='"+ppVMExtendedResourceEnv+"')].value}").Output()
if err != nil {
e2e.Logf("ERROR: unable to get peerpod webhook deployment: %v in namepsace: %v - error: %v", ppWebhookDeploymentName, subscription.namespace, err)
}
o.Expect(err).ToNot(o.HaveOccurred())
gjson.Get(kataPodObj, "spec.containers").ForEach(func(key, container gjson.Result) bool {
e2e.Logf("checking container: %s on pod: %s in namespace: %s ", gjson.Get(container.String(), "name").String(), newPodName, podNs)
ppVMResourceDefaults := strings.Replace(ppVMResourceDefaults, ".", "\\.", -1)
actualResourceLimit := gjson.Get(container.String(), "resources.limits."+ppVMResourceDefaults).String()
if strings.Compare(actualResourceLimit, expPPVmExtendedResourceLimit) != 0 {
e2e.Logf("ERROR: peerpod: %v in namepsace: %v has incorrect pod VM extended resource limit: %v", newPodName, podNs, actualResourceLimit)
}
o.Expect(actualResourceLimit).To(o.Equal(expPPVmExtendedResourceLimit))
actualResourceRequest := gjson.Get(container.String(), "resources.requests."+ppVMResourceDefaults).String()
if strings.Compare(actualResourceRequest, expPPVExtendedResourceRequest) != 0 {
e2e.Logf("ERROR: peerpod: %v in namepsace: %v has incorrect pod VM extended resource request: %v", newPodName, podNs, actualResourceRequest)
}
o.Expect(actualResourceRequest).To(o.Equal(expPPVExtendedResourceRequest))
return true
})
}
g.By("Checking Kata pod overhead")
// for non-peer kata pods, overhead is expected to be same as set in runtimeclass
runtimeClassObj, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("runtimeclass", kataconfig.runtimeClassName, "-o=json").Output()
if err != nil {
e2e.Logf("ERROR: unable to get runtimeclass: %v - error: %v", kataconfig.runtimeClassName, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
actualCpu := gjson.Get(kataPodObj, "spec.overhead.cpu").String()
expectedCpu := gjson.Get(runtimeClassObj, "overhead.podFixed.cpu").String()
if strings.Compare(expectedCpu, actualCpu) != 0 {
e2e.Logf("ERROR: kata pod: %v in namepsace: %v has incorrect cpu overhead: %v", newPodName, podNs, actualCpu)
}
o.Expect(expectedCpu).To(o.Equal(actualCpu))
actualMem := gjson.Get(kataPodObj, "spec.overhead.memory").String()
expectedMem := gjson.Get(runtimeClassObj, "overhead.podFixed.memory").String()
if strings.Compare(expectedMem, actualMem) != 0 {
e2e.Logf("ERROR: kata pod: %v in namepsace: %v has incorrect memory overhead: %v", newPodName, podNs, actualMem)
}
o.Expect(expectedMem).To(o.Equal(actualMem))
g.By("SUCCESS - kata pod overhead verified")
g.By("TEARDOWN - deleting the kata pod")
}) | |||||
test case | openshift/openshift-tests-private | d6ba3d02-3f6f-4d09-9f6a-08d330574a7a | Author:tbuskey-High-43619-oc admin top pod metrics works for pods that use kata runtime | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-High-43619-oc admin top pod metrics works for pods that use kata runtime", func() {
if kataconfig.enablePeerPods {
g.Skip("skipping. metrics are not in oc admin top pod with Peer Pods enabled")
}
oc.SetupProject()
var (
podNs = oc.Namespace()
podName string
err error
msg string
waitErr error
metricCount = 0
)
g.By("Deploy a pod with kata runtime")
podName = createKataPod(oc, podNs, defaultPod, "admtop", kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, podName)
msg, err = checkResourceJsonpath(oc, "pod", podName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
g.By("Get oc top adm metrics for the pod")
snooze = 360
waitErr = wait.Poll(10*time.Second, snooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("top", "pod", "-n", podNs, podName, "--no-headers").Output()
if err == nil { // Will get error with msg: error: metrics not available yet
metricCount = len(strings.Fields(msg))
}
if metricCount == 3 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(waitErr, "metrics never appeared")
if metricCount == 3 {
e2e.Logf("metrics for pod %v", msg)
}
o.Expect(metricCount).To(o.Equal(3))
g.By("Success")
}) | |||||
test case | openshift/openshift-tests-private | a0d7fffe-c2c9-4664-b8df-056a8231914a | Author:abhbaner-High-43516-operator is available in CatalogSource | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:abhbaner-High-43516-operator is available in CatalogSource", func() {
g.By("Checking catalog source for the operator")
opMarketplace, err := oc.AsAdmin().Run("get").Args("packagemanifests", "-n", "openshift-marketplace").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(opMarketplace).NotTo(o.BeEmpty())
o.Expect(opMarketplace).To(o.ContainSubstring("sandboxed-containers-operator"))
o.Expect(opMarketplace).To(o.ContainSubstring("Red Hat Operators"))
g.By("SUCCESS - 'sandboxed-containers-operator' is present in packagemanifests")
}) | ||||||
test case | openshift/openshift-tests-private | f0b7f74b-cf5d-4359-bced-9e2fde2bf2e7 | Author:tbuskey-High-43523-Monitor deletion[Disruptive][Serial][Slow] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-High-43523-Monitor deletion[Disruptive][Serial][Slow]", func() {
g.By("Delete kataconfig and verify it")
msg, err := deleteKataConfig(oc, kataconfig.name)
e2e.Logf("kataconfig %v was deleted\n--------- %v %v", kataconfig.name, msg, err)
g.By("SUCCESS")
}) | ||||||
test case | openshift/openshift-tests-private | 20c1ab19-6355-40c3-b956-6cc27a4ea677 | Author:tbuskey-High-41813-Build Acceptance test with deletion[Disruptive][Serial][Slow] | ['"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-High-41813-Build Acceptance test with deletion[Disruptive][Serial][Slow]", func() {
g.Skip("kataconfig deletion steps are skipped")
//This test will install operator,kataconfig,pod with kata - delete pod, delete kataconfig
oc.SetupProject()
var (
msg string
err error
defaultPodName = "example"
podNs = oc.Namespace()
)
g.By("Deploying pod and verify it")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
e2e.Logf("Pod (with Kata runtime) with name - %v , is installed: %v %v", newPodName, msg, err)
g.By("Deleting pod")
deleteKataResource(oc, "pod", podNs, newPodName)
g.By("Deleting kataconfig")
msg, err = deleteKataConfig(oc, kataconfig.name)
e2e.Logf("common kataconfig %v was deleted %v %v", kataconfig.name, msg, err)
g.By("SUCCESSS - build acceptance passed")
}) | |||||
test case | openshift/openshift-tests-private | 59952ba2-2585-4983-bb53-5eb3777a67a6 | Author:tbuskey-High-46235-Kata Metrics Verify that Namespace is labeled to enable monitoring | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-High-46235-Kata Metrics Verify that Namespace is labeled to enable monitoring", func() {
var (
err error
msg string
s string
label = ""
hasMetrics = false
)
g.By("Get labels of openshift-sandboxed-containers-operator namespace to check for monitoring")
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", "openshift-sandboxed-containers-operator", "-o=jsonpath={.metadata.labels}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
for _, s = range strings.SplitAfter(msg, ",") {
if strings.Contains(s, "openshift.io/cluster-monitoring") {
label = s
if strings.Contains(strings.SplitAfter(s, ":")[1], "true") {
hasMetrics = true
}
}
}
e2e.Logf("Label is %v", label)
o.Expect(hasMetrics).To(o.BeTrue())
g.By("Success")
}) | |||||
test case | openshift/openshift-tests-private | 9bf7829d-3cf5-406a-894f-8256bf751baf | Author:abhbaner-High-43524-Existing deployments (with runc) should restart normally after kata runtime install | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:abhbaner-High-43524-Existing deployments (with runc) should restart normally after kata runtime install", func() {
oc.SetupProject()
var (
podNs = oc.Namespace()
deployName = "dep-43524-" + getRandomString()
msg string
podName string
newPodName string
)
g.By("Create deployment config from template")
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName,
"-p", "IMAGE="+testrun.workloadImage).OutputToFile(getRandomString() + "dep-common.json")
if err != nil {
e2e.Logf("Could not create configFile %v %v", configFile, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Applying deployment file " + configFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", configFile, "-n", podNs).Output()
if err != nil {
e2e.Logf("Could not apply configFile %v %v", msg, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for deployment to be ready")
defer oc.AsAdmin().Run("delete").Args("deploy", "-n", podNs, deployName, "--ignore-not-found").Execute()
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).NotTo(o.BeEmpty())
// If the deployment is ready, pod will be. Might not need this
g.By("Wait for pods to be ready")
errCheck := wait.Poll(10*time.Second, 600*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().Run("get").Args("pods", "-n", podNs, "--no-headers").Output()
if !strings.Contains(msg, "No resources found") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Timed out waiting for pods %v %v", msg, err))
g.By("Get pod name")
msg, err = oc.AsAdmin().Run("get").Args("pods", "-n", podNs, "--no-headers").Output()
podName = strings.Split(msg, " ")[0]
e2e.Logf("podname %v %v", msg, err)
msg = fmt.Sprintf("Deleting pod %v from deployment", podName)
g.By(msg)
msg, err = deleteResource(oc, "pod", podName, podNs, podSnooze*time.Second, 10*time.Second)
e2e.Logf("%v pod deleted: %v %v", podName, msg, err)
g.By("Wait for deployment to re-replicate")
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).NotTo(o.BeEmpty())
g.By("Get new pod name")
msg, err = oc.AsAdmin().Run("get").Args("pods", "-n", podNs, "--no-headers").Output()
newPodName = strings.Split(msg, " ")[0]
e2e.Logf("new podname %v %v", msg, err)
if newPodName == podName {
e2e.Failf("A new pod did not get created")
}
g.By("SUCCESSS - kataconfig installed and post that pod with runc successfully restarted ")
msg, err = deleteResource(oc, "deploy", deployName, podNs, podSnooze*time.Second, 10*time.Second)
}) | |||||
test case | openshift/openshift-tests-private | 34bd4ddd-1019-4e03-bede-4361edd35bb8 | Longduration-NonPreRelease-Author:tbuskey-High-42167-Must-gather collects sandboxed operator logs[Serial] | ['"fmt"', '"os"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Longduration-NonPreRelease-Author:tbuskey-High-42167-Must-gather collects sandboxed operator logs[Serial]", func() {
g.Skip("mustgather test must be done manually")
type counts struct {
audits int
crio int
qemuLogs int
qemuVersion int
describeCsv int
describeKc int
describeServices int
describeSub int
describeVwebhook int
}
oc.SetupProject()
var (
deployConfigFile = ""
deployName = "mg-42167-" + getRandomString()
deploymentFile = getRandomString() + "dep-common.json"
podNs = oc.Namespace()
err error
fails = 0
kcLogLevel = "{\"spec\":{\"logLevel\":\"debug\"}}"
logFile string
mustgatherFiles = []string{""}
mustgatherName = "mustgather" + getRandomString()
mustgatherDir = "/tmp/" + mustgatherName
mustgatherLog = mustgatherName + ".log"
msg string
nodeControlCount int
nodeWorkerCount int
singleNode = false
isWorker = false
)
mustgatherChecks := counts{
audits: 0,
crio: 0,
qemuLogs: 0,
qemuVersion: 0,
describeCsv: 0,
describeKc: 0,
describeServices: 0,
describeSub: 0,
describeVwebhook: 0,
}
nodeControlList, err := exutil.GetClusterNodesBy(oc, "master")
msgIfErr := fmt.Sprintf("getClusterNodesBy master %v %v", nodeControlList, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
nodeControlCount = len(nodeControlList)
nodeWorkerList, err := exutil.GetClusterNodesBy(oc, "worker")
msgIfErr = fmt.Sprintf("getClusterNodesBy worker %v %v", nodeWorkerList, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
nodeWorkerCount = len(nodeWorkerList)
mustgatherExpected := counts{
audits: 1,
crio: nodeWorkerCount + nodeControlCount,
qemuLogs: nodeWorkerCount, // Need to change from deployment
qemuVersion: nodeWorkerCount,
describeCsv: 1,
describeKc: 1,
describeServices: 1,
describeSub: 1,
describeVwebhook: 1,
}
// for SNO
if nodeWorkerCount == 1 && !strings.Contains(nodeWorkerList[0], "worker") {
singleNode = true
mustgatherExpected.crio = nodeWorkerCount
}
// patch kataconfig for debug
_, _ = oc.AsAdmin().Run("patch").Args("kataconfig", kataconfig.name, "-n", subscription.namespace, "--type", "merge", "--patch", kcLogLevel).Output()
msg, err = waitForNodesInDebug(oc, subscription.namespace)
e2e.Logf("%v", msg)
/* Create a deployment file from template with N replicas where N=worker nodes
It does not ensure that there is a replica on each worker node.
Loop because on 4.12 SNO, nodes might not respond at 1st
error: unable to process template
service unavailable
exit status 1 */
errCheck := wait.Poll(10*time.Second, 360*time.Second, func() (bool, error) {
deployConfigFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "NAMESPACE="+podNs, "-p", "REPLICAS="+fmt.Sprintf("%v", nodeWorkerCount),
"-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName, "-p", "IMAGE="+testrun.workloadImage).OutputToFile(deploymentFile)
if strings.Contains(deployConfigFile, deploymentFile) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Error: Unable to create deployment file from template: %v %v", deployConfigFile, err))
o.Expect(deployConfigFile).NotTo(o.BeEmpty(), "empty deploy file error %v", err)
_, err = oc.AsAdmin().Run("apply").Args("-f", deployConfigFile, "-n", podNs).Output()
defer oc.AsAdmin().Run("delete").Args("deploy", "-n", podNs, deployName, "--ignore-not-found").Execute()
msg, err = waitForDeployment(oc, podNs, deployName)
msgIfErr = fmt.Sprintf("ERROR: waitForDeployment %v: %v %v", deployName, msg, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(msg).NotTo(o.BeEmpty(), msgIfErr)
defer os.RemoveAll(mustgatherDir)
logFile, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("-n", subscription.namespace, "must-gather", "--image="+testrun.mustgatherImage, "--dest-dir="+mustgatherDir).OutputToFile(mustgatherLog)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: mustgather %v has an error %v %v", mustgatherLog, logFile, err))
files, err := os.ReadDir(mustgatherDir)
msgIfErr = fmt.Sprintf("ERROR %v contents %v\n%v", mustgatherDir, err, files)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(files).NotTo(o.BeEmpty(), msgIfErr)
err = filepath.Walk(mustgatherDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
e2e.Logf("Error on %v: %v", path, err)
return err
}
isWorker = false
for _, worker := range nodeWorkerList {
if strings.Contains(path, worker) {
isWorker = true
break
}
}
if info.IsDir() { // qemu will create a directory but might not create files
if isWorker == true && strings.Contains(path, "/run/vc/crio/fifo") && !strings.Contains(path, "/run/vc/crio/fifo/io") {
mustgatherChecks.qemuLogs++
}
}
if !info.IsDir() {
mustgatherFiles = append(mustgatherFiles, path)
if strings.Contains(path, "audit.log") {
mustgatherChecks.audits++
}
if strings.Contains(path, "/nodes/") {
if strings.Contains(path, "_logs_crio") {
mustgatherChecks.crio++
}
// in SNO, no worker, just master
if (isWorker == true || (singleNode == true && isWorker != true)) && strings.Contains(path, "/version") {
mustgatherChecks.qemuVersion++
}
}
if strings.Contains(path, "/sandboxed-containers") {
if strings.Contains(path, "/clusterserviceversion_description") {
mustgatherChecks.describeCsv++
}
if strings.Contains(path, "/kataconfig_description") {
mustgatherChecks.describeKc++
}
if strings.Contains(path, "/services_description") {
mustgatherChecks.describeServices++
}
if strings.Contains(path, "/subscription_description") {
mustgatherChecks.describeSub++
}
if strings.Contains(path, "/validatingwebhookconfigurations_description") {
mustgatherChecks.describeVwebhook++
}
}
}
return nil
})
e2e.Logf("%v files in must-gather dir %v", len(mustgatherFiles), mustgatherDir)
e2e.Logf("expected: %v", mustgatherExpected)
e2e.Logf("actual : %v", mustgatherChecks)
e2e.Logf("mustgatherChecks.audits : %v", mustgatherChecks.audits)
if mustgatherChecks.audits < mustgatherExpected.audits {
e2e.Logf("Audit logs (%v) not found on any worker nodes (%v)", mustgatherChecks.audits, mustgatherExpected.audits)
fails++
}
e2e.Logf("mustgatherChecks.crio : %v", mustgatherChecks.crio)
if mustgatherChecks.crio != (mustgatherExpected.crio) {
e2e.Logf("crio logs (%v) did exist on all nodes (%v)", mustgatherChecks.crio, (mustgatherExpected.crio))
fails++
}
// A deployment will place VMs based on loads
// to ensure a VM is on each node another method is needed
e2e.Logf("mustgatherChecks.qemuLogs : %v", mustgatherChecks.qemuLogs)
if mustgatherChecks.qemuLogs != mustgatherExpected.qemuLogs {
e2e.Logf("qemu log directory (%v) does not exist on all worker nodes (%v), is ok", mustgatherChecks.qemuLogs, mustgatherExpected.qemuLogs)
// VMs should be 1 on each worker node but k8s might put 2 on a node & 0 on another per node load
if !singleNode && mustgatherChecks.qemuLogs < 1 { // because deployment is used
fails++
}
}
e2e.Logf("mustgatherChecks.qemuVersion : %v", mustgatherChecks.qemuVersion)
if mustgatherChecks.qemuVersion != mustgatherExpected.qemuVersion {
e2e.Logf("rpm version log (%v) did not exist on worker nodes (%v)", mustgatherChecks.qemuVersion, mustgatherExpected.qemuVersion)
fails++
}
e2e.Logf("mustgatherChecks.describeCsv : %v", mustgatherChecks.describeCsv)
if mustgatherChecks.describeCsv != mustgatherExpected.describeCsv {
e2e.Logf("describeCsv (%v) did not exist", mustgatherChecks.describeCsv)
fails++
}
e2e.Logf("mustgatherChecks.describeKc : %v", mustgatherChecks.describeKc)
if mustgatherChecks.describeKc != mustgatherExpected.describeKc {
e2e.Logf("describeKc (%v) did not exist", mustgatherChecks.describeKc)
fails++
}
e2e.Logf("mustgatherChecks.describeServices : %v", mustgatherChecks.describeServices)
if mustgatherChecks.describeServices != mustgatherExpected.describeServices {
e2e.Logf("describeServices (%v) did not exist", mustgatherChecks.describeServices)
fails++
}
e2e.Logf("mustgatherChecks.describeSub : %v", mustgatherChecks.describeSub)
if mustgatherChecks.describeSub != mustgatherExpected.describeSub {
e2e.Logf("describeSub (%v) did not exist", mustgatherChecks.describeSub)
fails++
}
e2e.Logf("mustgatherChecks.describeVwebhook : %v", mustgatherChecks.describeVwebhook)
if mustgatherChecks.describeVwebhook != mustgatherExpected.describeVwebhook {
e2e.Logf("describeVwebhook (%v) did not exist", mustgatherChecks.describeVwebhook)
fails++
}
o.Expect(fails).To(o.Equal(0), fmt.Sprintf("%v logs did not match expectd results\n%v", fails, mustgatherExpected))
e2e.Logf("STEP: SUCCESS")
}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.