element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case | openshift/openshift-tests-private | bcd86890-3855-479c-b8e5-1eee165ac982 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55724-Control plane machine set OnDelete update strategies - Delete/Add a failureDomain [Disruptive] | ['"strconv"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55724-Control plane machine set OnDelete update strategies - Delete/Add a failureDomain [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
g.By("Check failureDomains")
availabilityZones := getCPMSAvailabilityZones(oc, iaasPlatform)
if len(availabilityZones) <= 1 {
g.Skip("Skip for the failureDomains is no more than 1")
}
g.By("Update strategy to OnDelete")
key, value, machineName := getZoneAndMachineFromCPMSZones(oc, availabilityZones)
getMachineAvailabilityZoneJSON := getAvailabilityZoneJSONByCloud[iaasPlatform][getMachineAvailabilityZoneJSONCon]
getCPMSAvailabilityZonesJSON := getAvailabilityZoneJSONByCloud[iaasPlatform][getCPMSAvailabilityZonesJSONCon]
deleteFailureDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains."+iaasPlatform.String()+"["+strconv.Itoa(key)+"]}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer func() {
availabilityZonesStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getCPMSAvailabilityZonesJSON, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(availabilityZonesStr, value) {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
waitForCPMSUpdateCompleted(oc, 1)
}
}()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer waitForClusterStable(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"OnDelete"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Pick the failureDomain which has only one master machine and delete the failureDomain")
suffix := getMachineSuffix(oc, machineName)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"remove","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/`+strconv.Itoa(key)+`"}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete the master machine in the selected failureDomain")
clusterinfra.DeleteMachine(oc, machineName)
g.By("Check new master will be created in other zones and old master will be deleted")
labelsBefore := "machine.openshift.io/zone=" + value + ",machine.openshift.io/cluster-api-machine-type=master"
labelsAfter := "machine.openshift.io/zone!=" + value + ",machine.openshift.io/cluster-api-machine-type=master"
newMachineNameRolledWithFailureDomain := clusterinfra.WaitForMachineRunningBySuffix(oc, suffix, labelsAfter)
clusterinfra.WaitForMachineDisappearBySuffix(oc, suffix, labelsBefore)
waitForClusterStable(oc)
g.By("Check if it will rebalance the machines")
availabilityZones = getCPMSAvailabilityZones(oc, iaasPlatform)
if len(availabilityZones) >= 3 {
e2e.Logf("availabilityZones>=3 means the three master machines are in different zones now, it will not rebalance when adding new zone")
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(checkIfCPMSCoIsStable(oc)).To(o.BeTrue())
} else {
g.By("Add the failureDomain back to check OnDelete strategy rebalance the machines")
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, newMachineNameRolledWithFailureDomain, "-n", "openshift-machine-api", getMachineAvailabilityZoneJSON).Output()
o.Expect(err).NotTo(o.HaveOccurred())
labelsAfter = "machine.openshift.io/zone=" + availabilityZone + ",machine.openshift.io/cluster-api-machine-type=master"
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
g.By("Delete the new created master machine ")
clusterinfra.DeleteMachine(oc, newMachineNameRolledWithFailureDomain)
g.By("Check new master will be created in new added zone and old master will be deleted")
newMachineNameRolledBalancedFailureDomain := clusterinfra.WaitForMachinesRunningByLabel(oc, 1, labelsBefore)[0]
e2e.Logf("updatedMachineName:%s", newMachineNameRolledBalancedFailureDomain)
suffix = getMachineSuffix(oc, newMachineNameRolledBalancedFailureDomain)
clusterinfra.WaitForMachineDisappearBySuffix(oc, suffix, labelsAfter)
waitForClusterStable(oc)
}
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 2a7ff5ba-9e07-44f8-81bb-c25ebb4f3807 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55725-Control plane machine set OnDelete update strategies - Delete a master machine [Disruptive] | ['"math/rand"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55725-Control plane machine set OnDelete update strategies - Delete a master machine [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
g.By("Update strategy to OnDelete")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer waitForClusterStable(oc)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"OnDelete"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Random pick a master machine and delete manually to trigger OnDelete update")
toDeletedMachineName := clusterinfra.ListMasterMachineNames(oc)[rand.Int31n(int32(len(clusterinfra.ListMasterMachineNames(oc))))]
var getMachineAvailabilityZoneJSON string
labels := "machine.openshift.io/cluster-api-machine-type=master"
if iaasPlatform == clusterinfra.AWS || iaasPlatform == clusterinfra.Azure || iaasPlatform == clusterinfra.GCP {
getMachineAvailabilityZoneJSON = getAvailabilityZoneJSONByCloud[iaasPlatform][getMachineAvailabilityZoneJSONCon]
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, toDeletedMachineName, "-n", "openshift-machine-api", getMachineAvailabilityZoneJSON).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if availabilityZone != "" {
labels = "machine.openshift.io/zone=" + availabilityZone + ",machine.openshift.io/cluster-api-machine-type=master"
}
}
clusterinfra.DeleteMachine(oc, toDeletedMachineName)
g.By("Check new master will be created and old master will be deleted")
suffix := getMachineSuffix(oc, toDeletedMachineName)
clusterinfra.WaitForMachineRunningBySuffix(oc, suffix, labels)
clusterinfra.WaitForMachineDisappearByName(oc, toDeletedMachineName)
waitForClusterStable(oc)
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | d57a659d-fbde-4747-94f8-87ab6f966fad | Author:zhsun-NonHyperShiftHOST-Medium-53328-It doesn't rearrange the availability zones if the order of the zones isn't matching in the CPMS and the Control Plane [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-53328-It doesn't rearrange the availability zones if the order of the zones isn't matching in the CPMS and the Control Plane [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
skipForCPMSNotStable(oc)
g.By("Check failureDomains")
availabilityZones := getCPMSAvailabilityZones(oc, iaasPlatform)
if len(availabilityZones) <= 1 {
g.Skip("Skip for the failureDomains is no more than 1")
}
g.By("Update strategy to OnDelete so that it will not trigger update automaticly")
defer printNodeInfo(oc)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"OnDelete"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Change the failureDomain's order by deleting/adding failureDomain")
changeFailureDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains."+iaasPlatform.String()+"[1]}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"remove","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/1"}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+changeFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Update strategy to RollingUpdate check if will rearrange the availability zones and no update for masters")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
newAvailabilityZones := getCPMSAvailabilityZones(oc, iaasPlatform)
o.Expect(strings.Join(newAvailabilityZones, "")).To(o.ContainSubstring(availabilityZones[1] + availabilityZones[0] + strings.Join(availabilityZones[2:], "")))
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | e923fea0-7230-45e4-a04d-b15afbd1b536 | Author:zhsun-NonHyperShiftHOST-Medium-54895-CPMS generator controller will create a new CPMS if a CPMS is removed from cluster [Disruptive] | ['"fmt"', '"strconv"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-54895-CPMS generator controller will create a new CPMS if a CPMS is removed from cluster [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
skipForCPMSNotStable(oc)
g.By("Delete controlplanemachineset")
defer printNodeInfo(oc)
defer activeControlPlaneMachineSet(oc)
deleteControlPlaneMachineSet(oc)
g.By("Check a new controlplanemachineset will be created and state is Inactive ")
err := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
cpmsState, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace, "-o=jsonpath={.spec.state}").Output()
if cpmsState != "Inactive" {
e2e.Logf("controlplanemachineset is not in Inactive state and waiting up to 2 seconds ...")
return false, nil
}
e2e.Logf("controlplanemachineset is in Inactive state")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "controlplanemachineset is not in Inactive state")
g.By("Check controlplanemachineset do not reconcile master machines if state is Inactive")
var fieldName string
var fieldValue = "invalid"
switch iaasPlatform {
case clusterinfra.AWS:
fieldName = "instanceType"
case clusterinfra.Azure:
fieldName = "vmSize"
case clusterinfra.GCP:
fieldName = "machineType"
confidentialCompute, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.confidentialCompute}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if confidentialCompute == "Enabled" {
fieldValue = "c2d-standard-4"
}
case clusterinfra.Nutanix:
fieldName = "bootType"
fieldValue = "Legacy"
case clusterinfra.VSphere:
fieldName = "diskGiB"
fieldValue = strconv.Itoa(140)
default:
e2e.Logf("The " + iaasPlatform.String() + " Platform is not supported for now.")
}
if iaasPlatform == clusterinfra.VSphere {
// Construct JSON payload with the appropriate type handling for fieldValue
jsonPayload := fmt.Sprintf(`{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"spec":{"providerSpec":{"value":{"%s":%v}}}}}}}`, fieldName, fieldValue)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", jsonPayload, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"spec":{"providerSpec":{"value":{"`+fieldName+`":"`+fieldValue+`"}}}}}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
activeControlPlaneMachineSet(oc)
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | bf811547-1e7f-4d66-914f-02a7ff79573a | Author:zhsun-NonHyperShiftHOST-Medium-52587-Webhook validations for CPMS resource [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-52587-Webhook validations for CPMS resource [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
g.By("Update CPMS name")
cpmsName, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"metadata":{"name":"invalid"}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(cpmsName).To(o.ContainSubstring("the name of the object (invalid) does not match the name on the URL (cluster)"))
g.By("Update CPMS replicas")
cpmsReplicas, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"replicas": 4}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(cpmsReplicas).To(o.ContainSubstring("Unsupported value"))
g.By("Update CPMS selector")
cpmsSelector, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"selector":{"matchLabels":{"machine.openshift.io/cluster-api-cluster": null}}}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(cpmsSelector).To(o.ContainSubstring("selector is immutable"))
g.By("Update CPMS labels")
cpmsLabel, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"metadata":{"labels":{"machine.openshift.io/cluster-api-cluster": null, "machine.openshift.io/cluster-api-machine-role": "invalid", "machine.openshift.io/cluster-api-machine-type": "invalid"}}}}}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(cpmsLabel).To(o.ContainSubstring("label 'machine.openshift.io/cluster-api-cluster' is required"))
o.Expect(cpmsLabel).To(o.ContainSubstring("label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master'"))
o.Expect(cpmsLabel).To(o.ContainSubstring("label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master'"))
g.By("Update CPMS state")
cpmsState, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"state":"Inactive"}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(cpmsState).To(o.ContainSubstring("state cannot be changed once Active"))
}) | |||||
test case | openshift/openshift-tests-private | c1e626fc-0295-466f-91ab-7ea765d5ba8f | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55485-Implement update logic for RollingUpdate CPMS strategy - Delete/Add a failureDomain [Disruptive] | ['"strconv"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55485-Implement update logic for RollingUpdate CPMS strategy - Delete/Add a failureDomain [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
g.By("Check failureDomains")
availabilityZones := getCPMSAvailabilityZones(oc, iaasPlatform)
if len(availabilityZones) <= 1 {
g.Skip("Skip for the failureDomains is no more than 1")
}
g.By("Pick the failureDomain which has only one master machine")
availabilityZones = getCPMSAvailabilityZones(oc, iaasPlatform)
key, value, machineName := getZoneAndMachineFromCPMSZones(oc, availabilityZones)
suffix := getMachineSuffix(oc, machineName)
getMachineAvailabilityZoneJSON := getAvailabilityZoneJSONByCloud[iaasPlatform][getMachineAvailabilityZoneJSONCon]
getCPMSAvailabilityZonesJSON := getAvailabilityZoneJSONByCloud[iaasPlatform][getCPMSAvailabilityZonesJSONCon]
deleteFailureDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains."+iaasPlatform.String()+"["+strconv.Itoa(key)+"]}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete the failureDomain to trigger RollingUpdate")
labelsBefore := "machine.openshift.io/zone=" + value + ",machine.openshift.io/cluster-api-machine-type=master"
labelsAfter := "machine.openshift.io/zone!=" + value + ",machine.openshift.io/cluster-api-machine-type=master"
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer func() {
availabilityZonesStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getCPMSAvailabilityZonesJSON, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(availabilityZonesStr, value) {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
waitForCPMSUpdateCompleted(oc, 1)
}
}()
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"remove","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/`+strconv.Itoa(key)+`"}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
newMachineNameRolledWithFailureDomain := clusterinfra.WaitForMachineRunningBySuffix(oc, suffix, labelsAfter)
clusterinfra.WaitForMachineDisappearBySuffix(oc, suffix, labelsBefore)
waitForClusterStable(oc)
g.By("Check if it will rebalance the machines")
availabilityZones = getCPMSAvailabilityZones(oc, iaasPlatform)
if len(availabilityZones) >= 3 {
e2e.Logf("availabilityZones>=3 means the three master machines are in different zones now, it will not rebalance when adding new zone")
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(checkIfCPMSCoIsStable(oc)).To(o.BeTrue())
} else {
g.By("Add the failureDomain back to check RollingUpdate strategy rebalance the machines")
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, newMachineNameRolledWithFailureDomain, "-n", "openshift-machine-api", getMachineAvailabilityZoneJSON).Output()
o.Expect(err).NotTo(o.HaveOccurred())
labelsAfter = "machine.openshift.io/zone=" + availabilityZone + ",machine.openshift.io/cluster-api-machine-type=master"
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
newMachineNameRolledBalancedFailureDomain := clusterinfra.WaitForMachinesRunningByLabel(oc, 1, labelsBefore)[0]
e2e.Logf("updatedMachineName:%s", newMachineNameRolledBalancedFailureDomain)
suffix = getMachineSuffix(oc, newMachineNameRolledBalancedFailureDomain)
clusterinfra.WaitForMachineDisappearBySuffix(oc, suffix, labelsAfter)
waitForClusterStable(oc)
}
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 689fc721-b0be-4d24-9da3-2fab53008eee | Author:zhsun-NonHyperShiftHOST-Medium-70442-A warning should be shown when removing the target pools from cpms [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-70442-A warning should be shown when removing the target pools from cpms [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
publicZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns", "cluster", "-n", "openshift-dns", "-o=jsonpath={.spec.publicZone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if publicZone == "" {
g.Skip("Because on private clusters we don't use target pools so skip this case for private clusters!!")
}
targetPool := "null"
g.By("Add targetpool")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
patchWithTargetPool, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/spec/providerSpec/value/targetPools","value":`+targetPool+`}]`, "--type=json", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Remove targetpool")
patchWithoutTargetPool, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"remove","path":"/spec/template/machines_v1beta1_machine_openshift_io/spec/providerSpec/value/targetPools"}]`, "--type=json", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(patchWithTargetPool).To(o.ContainSubstring("Warning: spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.targetPools: TargetPools field is not set on ControlPlaneMachineSet"))
o.Expect(patchWithoutTargetPool).To(o.ContainSubstring("Warning: spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.targetPools: TargetPools field is not set on ControlPlaneMachineSet"))
}) | |||||
test case | openshift/openshift-tests-private | 8b6063e1-77c6-4695-ae95-b389bd2242b9 | Author:huliu-NonHyperShiftHOST-Medium-78773-[CPMS] Webhook validation for custom name formats to Control Plane Machines via CPMS [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:huliu-NonHyperShiftHOST-Medium-78773-[CPMS] Webhook validation for custom name formats to Control Plane Machines via CPMS [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
if !exutil.IsTechPreviewNoUpgrade(oc) {
g.Skip("featureSet: TechPreviewNoUpgrade is required for this test")
}
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
g.By("Patch invalid machine name prefix")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":"abcd_0"}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(out).To(o.ContainSubstring(`Invalid value: "string": a lowercase RFC 1123 subdomain must consist of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'). Each block, separated by periods, must start and end with an alphanumeric character. Hyphens are not allowed at the start or end of a block, and consecutive periods are not permitted.`))
}) | |||||
test | openshift/openshift-tests-private | 6a6071b5-2b4c-4973-b8ec-11880cf9a7cd | machines | import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | package clusterinfrastructure
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure MAPI", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("machine-api-operator", exutil.KubeConfigPath())
infrastructureName string
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
infrastructureName = clusterinfra.GetInfrastructureName(oc)
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-45772-MachineSet selector is immutable", func() {
g.By("Create a new machineset")
clusterinfra.SkipConditionally(oc)
machinesetName := infrastructureName + "-45772"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with empty clusterID")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"selector":{"matchLabels":{"machine.openshift.io/cluster-api-cluster": null}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("selector is immutable"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-45377-Enable accelerated network via MachineSets on azure [Disruptive]", func() {
g.By("Create a new machineset with acceleratedNetworking: true")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
machinesetName := infrastructureName + "-45377"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
arch, err := clusterinfra.GetArchitectureFromMachineSet(oc, machinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
var vmSize string
switch arch {
case architecture.AMD64:
vmSize = "Standard_D2s_v3"
case architecture.ARM64:
vmSize = "Standard_D8ps_v5"
default:
g.Skip("This case doesn't support other architectures than arm64, amd64")
}
g.By("Update machineset with acceleratedNetworking: true")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n",
machineAPINamespace, "-p",
fmt.Sprintf(`{"spec":{"replicas":1,"template":{"spec":{"providerSpec":`+
`{"value":{"acceleratedNetworking":true,"vmSize":"%s"}}}}}}`, vmSize),
"--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//test when set acceleratedNetworking: true, machine running needs nearly 9 minutes. so change the method timeout as 10 minutes.
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with acceleratedNetworking: true")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.acceleratedNetworking}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(out).To(o.ContainSubstring("true"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-46967-Implement Ephemeral OS Disks - OS cache placement on azure [Disruptive]", func() {
g.By("Create a new machineset with Ephemeral OS Disks - OS cache placement")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
skipTestIfSpotWorkers(oc)
machinesetName := infrastructureName + "-46967"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
arch, err := clusterinfra.GetArchitectureFromMachineSet(oc, machinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
var vmSize string
switch arch {
case architecture.AMD64:
vmSize = "Standard_D2s_v3"
case architecture.ARM64:
vmSize = "Standard_D2plds_v5"
default:
g.Skip("This case doesn't support other architectures than arm64, amd64")
}
g.By("Update machineset with Ephemeral OS Disks - OS cache placement")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n",
machineAPINamespace, "-p",
fmt.Sprintf(`{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"vmSize":"%s",`+
`"osDisk":{"diskSizeGB":30,"cachingType":"ReadOnly","diskSettings":{"ephemeralStorageLocation":"Local"},`+
`"managedDisk":{"storageAccountType":""}}}}}}}}`, vmSize), "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with Ephemeral OS Disks - OS cache placement")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.osDisk.diskSettings.ephemeralStorageLocation}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(out).To(o.ContainSubstring("Local"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-46303-Availability sets could be created when needed for azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
defaultWorkerMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, defaultWorkerMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "northcentralus" && region != "westus" {
/*
This case only supports on a region which doesn't have zones.
These two regions cover most of the templates in flexy-templates and they don't have zones,
so restricting the test is only applicable in these two regions.
*/
g.Skip("Skip this test scenario because the test is only applicable in \"northcentralus\" or \"westus\" region")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-46303"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with availabilitySet already created for the default worker machineset")
/*
If the availability set is not created for the default worker machineset,
machine status will be failed and error message shows "Availability Set cannot be found".
Therefore, if machine created successfully with the availability set,
then it can prove that the availability set has been created when the default worker machineset is created.
*/
availabilitySetName := defaultWorkerMachinesetName + "-as"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"availabilitySet":"`+availabilitySetName+`"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with availabilitySet")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.availabilitySet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("availability set name is: %s", out)
o.Expect(out == availabilitySetName).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-47177-Medium-47201-[MDH] Machine Deletion Hooks appropriately block lifecycle phases [Disruptive]", func() {
g.By("Create a new machineset with lifecycle hook")
clusterinfra.SkipConditionally(oc)
machinesetName := infrastructureName + "-47177-47201"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with lifecycle hook")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"lifecycleHooks":{"preDrain":[{"name":"drain1","owner":"drain-controller1"}],"preTerminate":[{"name":"terminate2","owner":"terminate-controller2"}]}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Delete newly created machine by scaling " + machinesetName + " to 0")
err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("--replicas=0", "-n", "openshift-machine-api", mapiMachineset, machinesetName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for machine to go into Deleting phase")
err = wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].status.phase}").Output()
if output != "Deleting" {
e2e.Logf("machine is not in Deleting phase and waiting up to 2 seconds ...")
return false, nil
}
e2e.Logf("machine is in Deleting phase")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Check machine phase failed")
g.By("Check machine stuck in Deleting phase because of lifecycle hook")
outDrain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].status.conditions[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("outDrain:%s", outDrain)
o.Expect(strings.Contains(outDrain, "\"message\":\"Drain operation currently blocked by: [{Name:drain1 Owner:drain-controller1}]\"") && strings.Contains(outDrain, "\"reason\":\"HookPresent\"") && strings.Contains(outDrain, "\"status\":\"False\"") && strings.Contains(outDrain, "\"type\":\"Drainable\"")).To(o.BeTrue())
outTerminate, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].status.conditions[2]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("outTerminate:%s", outTerminate)
o.Expect(strings.Contains(outTerminate, "\"message\":\"Terminate operation currently blocked by: [{Name:terminate2 Owner:terminate-controller2}]\"") && strings.Contains(outTerminate, "\"reason\":\"HookPresent\"") && strings.Contains(outTerminate, "\"status\":\"False\"") && strings.Contains(outTerminate, "\"type\":\"Terminable\"")).To(o.BeTrue())
g.By("Update machine without lifecycle hook")
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachine, machineName, "-n", "openshift-machine-api", "-p", `[{"op": "remove", "path": "/spec/lifecycleHooks/preDrain"},{"op": "remove", "path": "/spec/lifecycleHooks/preTerminate"}]`, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-47230-[MDH] Negative lifecycle hook validation [Disruptive]", func() {
g.By("Create a new machineset")
clusterinfra.SkipConditionally(oc)
machinesetName := infrastructureName + "-47230"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
checkItems := []struct {
patchstr string
errormsg string
}{
{
patchstr: `{"spec":{"lifecycleHooks":{"preTerminate":[{"name":"","owner":"drain-controller1"}]}}}`,
errormsg: "name in body should be at least 3 chars long",
},
{
patchstr: `{"spec":{"lifecycleHooks":{"preDrain":[{"name":"drain1","owner":""}]}}}`,
errormsg: "owner in body should be at least 3 chars long",
},
{
patchstr: `{"spec":{"lifecycleHooks":{"preDrain":[{"name":"drain1","owner":"drain-controller1"},{"name":"drain1","owner":"drain-controller2"}]}}}`,
errormsg: "Duplicate value: map[string]interface {}{\"name\":\"drain1\"}",
},
}
for i, checkItem := range checkItems {
g.By("Update machine with invalid lifecycle hook")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachine, machineName, "-n", "openshift-machine-api", "-p", checkItem.patchstr, "--type=merge").Output()
e2e.Logf("out"+strconv.Itoa(i)+":%s", out)
o.Expect(strings.Contains(out, checkItem.errormsg)).To(o.BeTrue())
}
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-44977-Machine with GPU is supported on gcp [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
architecture.SkipArchitectures(oc, architecture.ARM64)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-44977"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
arch, err := clusterinfra.GetArchitectureFromMachineSet(oc, machinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
if arch != architecture.AMD64 {
g.Skip("The selected machine set's arch is not amd64, skip this case!")
}
//check supported zone for gpu
zone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(zone, "us-central1-") {
g.Skip("not valid zone for GPU machines")
}
g.By("Update machineset with GPU")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{ "gpus": [ { "count": 1,"type": "nvidia-tesla-p100" }],"machineType":"n1-standard-1", "zone":"us-central1-c", "onHostMaintenance":"Terminate","restartPolicy":"Always"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with GPU")
gpuType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.gpus[0].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
onHostMaintenance, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.onHostMaintenance}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
restartPolicy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.restartPolicy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("gpuType:%s, onHostMaintenance:%s, restartPolicy:%s", gpuType, onHostMaintenance, restartPolicy)
o.Expect(strings.Contains(gpuType, "nvidia-tesla-p100") && strings.Contains(onHostMaintenance, "Terminate") && strings.Contains(restartPolicy, "Always")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-48363-Machine providerID should be consistent with node providerID", func() {
g.By("Check machine providerID and node providerID are consistent")
clusterinfra.SkipConditionally(oc)
machineList := clusterinfra.ListAllMachineNames(oc)
for _, machineName := range machineList {
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName)
if nodeName == "" {
continue
}
machineProviderID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machineName, "-o=jsonpath={.spec.providerID}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeProviderID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.spec.providerID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(machineProviderID).Should(o.Equal(nodeProviderID))
}
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-High-35513-Windows machine should successfully provision for aws [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
architecture.SkipNonAmd64SingleArch(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-35513"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
var amiID string
switch region {
case "us-east-1", "us-iso-east-1":
amiID = "ami-0e09e139aca053387"
case "us-east-2":
amiID = "ami-0f4f40c1e7ef56be6"
default:
e2e.Logf("Not support region for the case for now.")
g.Skip("Not support region for the case for now.")
}
g.By("Update machineset with windows ami")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"metadata":{"labels":{"machine.openshift.io/os-id": "Windows"}},"spec":{"providerSpec":{"value":{"ami":{"id":"`+amiID+`"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineProvisioned(oc, machinesetName)
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-48012-Change AWS EBS GP3 IOPS in MachineSet should take affect on aws [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
clusterinfra.SkipForAwsOutpostCluster(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-48012"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with gp3 iops 5000")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"blockDevices":[{"ebs":{"volumeType":"gp3","iops":5000}}]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check on aws instance with gp3 iops 5000")
instanceID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-o=jsonpath={.items[0].status.providerStatus.instanceId}", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.GetAwsCredentialFromCluster(oc)
volumeInfo, err := clusterinfra.GetAwsVolumeInfoAttachedToInstanceID(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("volumeInfo:%s", volumeInfo)
o.Expect(strings.Contains(volumeInfo, "\"Iops\":5000") && strings.Contains(volumeInfo, "\"VolumeType\":\"gp3\"")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-33040-Required configuration should be added to the ProviderSpec to enable spot instances - azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region == "northcentralus" || region == "westus" || region == "usgovtexas" {
g.Skip("Skip this test scenario because it is not supported on the " + region + " region, because this region doesn't have zones")
}
g.By("Create a spot instance on azure")
clusterinfra.SkipConditionally(oc)
machinesetName := infrastructureName + "-33040"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"spotVMOptions":{}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine and node were labelled as an `interruptible-instance`")
machine, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(machine).NotTo(o.BeEmpty())
node, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-n", machineAPINamespace, "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(node).NotTo(o.BeEmpty())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-48594-AWS EFA network interfaces should be supported via machine api [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
architecture.SkipNonAmd64SingleArch(oc)
clusterinfra.SkipForAwsOutpostCluster(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "us-east-2" && region != "us-east-1" {
g.Skip("Not support region " + region + " for the case for now.")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-48594"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with networkInterfaceType: EFA")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"networkInterfaceType":"EFA","instanceType":"m5dn.24xlarge"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with networkInterfaceType: EFA")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.networkInterfaceType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(out).Should(o.Equal("EFA"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-48595-Negative validation for AWS NetworkInterfaceType [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
architecture.SkipNonAmd64SingleArch(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "us-east-2" && region != "us-east-1" {
g.Skip("Not support region " + region + " for the case for now.")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-48595"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with networkInterfaceType: invalid")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"networkInterfaceType":"invalid","instanceType":"m5dn.24xlarge"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value")).To(o.BeTrue())
g.By("Update machineset with not supported instance types")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"networkInterfaceType":"EFA","instanceType":"m6i.xlarge"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].status.errorMessage}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(strings.Contains(out, "not supported")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-49827-Ensure pd-balanced disk is supported on GCP via machine api [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-49827"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with invalid disk type")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `[{"op":"replace","path":"/spec/template/spec/providerSpec/value/disks/0/type","value":"invalid"}]`, "--type=json").Output()
o.Expect(strings.Contains(out, "Unsupported value")).To(o.BeTrue())
g.By("Update machineset with pd-balanced disk type")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `[{"op":"replace","path":"/spec/replicas","value": 1},{"op":"replace","path":"/spec/template/spec/providerSpec/value/disks/0/type","value":"pd-balanced"}]`, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with pd-balanced disk type")
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.disks[0].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(out).Should(o.Equal("pd-balanced"))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-50731-Enable IMDSv2 on existing worker machines via machine set [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-50731"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with imds required")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"metadataServiceOptions":{"authentication":"Required"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.metadataServiceOptions.authentication}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(out).Should(o.ContainSubstring("Required"))
g.By("Update machineset with imds optional")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"metadataServiceOptions":{"authentication":"Optional"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMachine, machineName, "-n", machineAPINamespace).Execute()
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[*].spec.providerSpec.value.metadataServiceOptions.authentication}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("Optional"))
g.By("Update machine with invalid authentication ")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"metadataServiceOptions":{"authentication":"invalid"}}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \"invalid\": Allowed values are either 'Optional' or 'Required'")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-37915-Creating machines using KMS keys from AWS [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
clusterinfra.SkipForAwsOutpostCluster(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
kmsClient := exutil.NewKMSClient(region)
key, err := kmsClient.CreateKey(infrastructureName + " key 37915")
if err != nil {
g.Skip("Create key failed, skip the cases!!")
}
defer func() {
err := kmsClient.DeleteKey(key)
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Create a new machineset")
machinesetName := infrastructureName + "-37915"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"blockDevices": [{"ebs":{"encrypted":true,"iops":0,"kmsKey":{"arn":"`+key+`"},"volumeSize":120,"volumeType":"gp2"}}]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with KMS keys")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.blockDevices[0].ebs.kmsKey.arn}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("arn:aws:kms"))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-52471-Enable configuration of boot diagnostics when creating VMs on azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
g.By("Create a machineset configuring boot diagnostics with Azure managed storage accounts")
machinesetName := infrastructureName + "-52471-1"
ms1 := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms1.DeleteMachineSet(oc)
ms1.CreateMachineSet(oc)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"AzureManaged"}}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.diagnostics.boot.storageAccountType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("AzureManaged"))
g.By("Create machineset configuring boot diagnostics with Customer managed storage accounts")
machinesetName = infrastructureName + "-52471-2"
ms2 := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms2.DeleteMachineSet(oc)
ms2.CreateMachineSet(oc)
storageAccount, _, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
cloudName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
storageAccountURISuffix := ".blob.core.windows.net/"
if strings.ToLower(cloudName) == "azureusgovernmentcloud" {
storageAccountURISuffix = ".blob.core.usgovcloudapi.net/"
}
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"CustomerManaged","customerManaged":{"storageAccountURI":"https://`+storageAccount+storageAccountURISuffix+`"}}}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.diagnostics.boot.storageAccountType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("CustomerManaged"))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-52473-Webhook validations for azure boot diagnostics [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
g.By("Create a machineset")
machinesetName := infrastructureName + "-52473-1"
ms1 := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms1.DeleteMachineSet(oc)
ms1.CreateMachineSet(oc)
g.By("Update machineset with invalid storage account type")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"AzureManaged-invalid"}}}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("storageAccountType must be one of: AzureManaged, CustomerManaged"))
g.By("Update machineset with Customer Managed boot diagnostics, with a missing storage account URI")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"CustomerManaged"}}}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("customerManaged configuration must be provided"))
g.By("Update machineset Azure managed storage accounts")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"AzureManaged","customerManaged":{"storageAccountURI":"https://clusterqa2ob.blob.core.windows.net"}}}}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("customerManaged may not be set when type is AzureManaged"))
g.By("Update machineset with invalid storageAccountURI")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"CustomerManaged","customerManaged":{"storageAccountURI":"https://clusterqa2ob.blob.core.windows.net.invalid"}}}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
g.By("Update machineset with invalid storage account")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":2,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"CustomerManaged","customerManaged":{"storageAccountURI":"https://invalid.blob.core.windows.net"}}}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Low-36489-Machineset creation when publicIP:true in disconnected or normal (stratergy private or public) azure,aws,gcp enviroment [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure, clusterinfra.AWS, clusterinfra.GCP)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-36489"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
iaasPlatform := clusterinfra.CheckPlatform(oc)
publicZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns", "cluster", "-n", "openshift-dns", "-o=jsonpath={.spec.publicZone}").Output()
if err != nil {
g.Fail("Issue with dns setup")
}
g.By("Update machineset with publicIP: true")
switch iaasPlatform {
case clusterinfra.AWS:
msg, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"publicIP": true}}}}}}`, "--type=merge").Output()
if publicZone == "" && iaasPlatform == clusterinfra.Azure {
o.Expect(msg).To(o.ContainSubstring("publicIP is not allowed in Azure disconnected installation with publish strategy as internal"))
} else {
o.Expect(err).NotTo(o.HaveOccurred())
//to scale up machineset with publicIP: true
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas": 1}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
}
case clusterinfra.Azure:
msg, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"publicIP": true}}}}}}`, "--type=merge").Output()
if publicZone == "" && iaasPlatform == clusterinfra.Azure {
o.Expect(msg).To(o.ContainSubstring("publicIP is not allowed in Azure disconnected installation with publish strategy as internal"))
} else {
o.Expect(err).NotTo(o.HaveOccurred())
//to scale up machineset with publicIP: true
//OutboundRules for VMs with public IpConfigurations with capi installation cannot provision publicIp(Limitation Azure)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas": 1}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
g.By("Check machineset with publicIP: true is not allowed for Azure")
status, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args(mapiMachine, machineName, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(status, "NicWithPublicIpCannotReferencePoolWithOutboundRule"))
}
case clusterinfra.GCP:
network, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.networkInterfaces[0].network}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
subnetwork, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.networkInterfaces[0].subnetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
patchString := fmt.Sprintf(`{"spec":{"template":{"spec":{"providerSpec":{"value":{"networkInterfaces":[{"network":"%s","subnetwork":"%s","publicIP": true}]}}}},"replicas":1}}`, network, subnetwork)
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", patchString, "--type=merge").Output()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
}
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-51013-machine api should issue client cert when AWS DNS suffix missing [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create a new dhcpOptions")
var newDhcpOptionsID, currentDhcpOptionsID string
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
newDhcpOptionsID, err := awsClient.CreateDhcpOptions()
if err != nil {
g.Skip("The credential is insufficient to perform create dhcpOptions operation, skip the cases!!")
}
defer awsClient.DeleteDhcpOptions(newDhcpOptionsID)
g.By("Associate the VPC with the new dhcpOptionsId")
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
currentDhcpOptionsID, err = awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
defer awsClient.AssociateDhcpOptions(vpcID, currentDhcpOptionsID)
err = awsClient.AssociateDhcpOptions(vpcID, newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new machineset")
machinesetName := infrastructureName + "-51013"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
nodeName := clusterinfra.GetNodeNameFromMachine(oc, clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0])
e2e.Logf("nodeName:%s", nodeName)
o.Expect(strings.HasPrefix(nodeName, "ip-")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-59718-[Nutanix] Support bootType categories and project fields of NutanixMachineProviderConfig [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix)
// skip zones other than Development-LTS
zones, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-o=jsonpath={.items[*].metadata.labels.machine\\.openshift\\.io\\/zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(zones, "Development-LTS") {
g.Skip(fmt.Sprintf("this case can be only run in Development-LTS zone, but is's %s", zones))
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-59718"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset adding these new fields")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"bootType":"Legacy","categories":[{"key":"AppType","value":"Kubernetes"},{"key":"Environment","value":"Testing"}],"project":{"type":"name","name":"qe-project"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with these new fields")
bootType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.bootType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
categories, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.categories}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
projectName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.project.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("bootType:%s, categories:%s, projectName:%s", bootType, categories, projectName)
o.Expect(strings.Contains(bootType, "Legacy") && strings.Contains(categories, "Kubernetes") && strings.Contains(categories, "Testing") && strings.Contains(projectName, "qe-project")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-59760-Create confidential compute VMs on GCP [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
//We should enable this case when Google provide this support for their ARM Machines
//https://issues.redhat.com/browse/OCPQE-22305
architecture.SkipArchitectures(oc, architecture.ARM64)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-59760"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
arch, err := clusterinfra.GetArchitectureFromMachineSet(oc, machinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
if arch != architecture.AMD64 {
g.Skip("The selected machine set's arch is not amd64, skip this case!")
}
g.By("Update machineset with confidential compute options")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"`+clusterinfra.GetInstanceTypeValuesByProviderAndArch(clusterinfra.GCP, arch)[1]+`","onHostMaintenance":"Terminate","confidentialCompute":"Enabled"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with confidentialCompute enabled")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.confidentialCompute}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("Enabled"))
g.By("Validate onHostMaintenance should be set to terminate in case confidential compute is enabled")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"`+clusterinfra.GetInstanceTypeValuesByProviderAndArch(clusterinfra.GCP, arch)[1]+`","onHostMaintenance":"invalid","confidentialCompute":"Enabled"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \"invalid\": onHostMaintenance must be either Migrate or Terminate")).To(o.BeTrue())
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"`+clusterinfra.GetInstanceTypeValuesByProviderAndArch(clusterinfra.GCP, arch)[1]+`","onHostMaintenance":"Migrate","confidentialCompute":"Enabled"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \"Migrate\": ConfidentialCompute require OnHostMaintenance to be set to Terminate, the current value is: Migrate")).To(o.BeTrue())
g.By("Validate the instance type support confidential computing")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"`+clusterinfra.GetInstanceTypeValuesByProviderAndArch(clusterinfra.GCP, arch)[0]+`","onHostMaintenance":"Terminate","confidentialCompute":"Enabled"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \""+clusterinfra.GetInstanceTypeValuesByProviderAndArch(clusterinfra.GCP, arch)[0]+"\": ConfidentialCompute require machine type in the following series: n2d,c2d")).To(o.BeTrue())
})
//author [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-57438-Add support to Shielded VMs on GCP [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
//We should enable this case when the bug fixed
//https://issues.redhat.com/browse/OCPBUGS-17904
architecture.SkipArchitectures(oc, architecture.ARM64)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-57438"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
arch, err := clusterinfra.GetArchitectureFromMachineSet(oc, machinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
if arch != architecture.AMD64 {
g.Skip("The selected machine set's arch is not amd64, skip this case!")
}
g.By("Update machineset with shieldedInstanceConfig compute options Enabled")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"shieldedInstanceConfig": {"secureBoot": "Enabled","integrityMonitoring": "Enabled","virtualizedTrustedPlatformModule": "Enabled"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with shieldedInstanceConfig options enabled")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.shieldedInstanceConfig}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("{\"integrityMonitoring\":\"Enabled\",\"secureBoot\":\"Enabled\",\"virtualizedTrustedPlatformModule\":\"Enabled\"}"))
g.By("Validate the webhooks warns with invalid values of shieldedVM config")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"shieldedInstanceConfig": {"secureBoot": "nabled","integrityMonitoring": "Enabled","virtualizedTrustedPlatformModule": "Enabled"}}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "secureBoot must be either Enabled or Disabled")).To(o.BeTrue())
})
//author [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-High-48464-Dedicated tenancy should be exposed on aws providerspec [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
clusterinfra.SkipForAwsOutpostCluster(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-48464"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset to have dedicated tenancy ")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placement": {"tenancy": "dedicated"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine available with dedicated tenancy ")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.placement.tenancy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("dedicated"))
g.By("Validate the webhooks warns with invalid values of tenancy config")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placement": {"tenancy": "invalid"}}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid providerSpec.tenancy, the only allowed options are: default, dedicated, host")).To(o.BeTrue())
})
//author [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-High-39639-host-based disk encryption at VM on azure platform [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-39639"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset to have encryption at host enabled ")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"securityProfile": {"encryptionAtHost": true}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine available with encrytption enabled ")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.securityProfile}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("{\"encryptionAtHost\":true"))
})
//author [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-32269-Implement validation/defaulting for AWS [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
mapiBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "mapi")
defaultMachinesetAwsTemplate := filepath.Join(mapiBaseDir, "default-machineset-aws.yaml")
clusterID := clusterinfra.GetInfrastructureName(oc)
masterArchtype := architecture.GetControlPlaneArch(oc)
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
msArchtype, err := clusterinfra.GetArchitectureFromMachineSet(oc, randomMachinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
if masterArchtype != msArchtype {
g.Skip("The selected machine set's arch is not the same with the master machine's arch, skip this case!")
}
amiID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.ami.id}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.placement.availabilityZone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sgName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.securityGroups[0].filters[0].values[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
subnet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.subnet.filters[0].values[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if subnet == "" {
subnet, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.subnet.id}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defaultMachinesetAwsTemplate = filepath.Join(mapiBaseDir, "default-machineset-aws-id.yaml")
}
iamInstanceProfileID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.iamInstanceProfile.id}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defaultMachinesetAws := defaultMachinesetAwsDescription{
name: infrastructureName + "-32269-default",
clustername: clusterID,
template: defaultMachinesetAwsTemplate,
amiID: amiID,
availabilityZone: availabilityZone,
sgName: sgName,
subnet: subnet,
namespace: machineAPINamespace,
iamInstanceProfileID: iamInstanceProfileID,
}
defer clusterinfra.WaitForMachinesDisapper(oc, defaultMachinesetAws.name)
defer defaultMachinesetAws.deleteDefaultMachineSetOnAws(oc)
defaultMachinesetAws.createDefaultMachineSetOnAws(oc)
instanceTypeMachine, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+defaultMachinesetAws.name, "-o=jsonpath={.items[0].spec.providerSpec.value.instanceType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
switch arch := architecture.ClusterArchitecture(oc); arch {
case architecture.AMD64:
o.Expect(instanceTypeMachine).Should(o.Equal("m5.large"))
case architecture.ARM64:
o.Expect(instanceTypeMachine).Should(o.Equal("m6g.large"))
default:
e2e.Logf("ignoring the validation of the instanceType for cluster architecture %s", arch.String())
}
})
//author [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-37497-ClusterInfrastructure Dedicated Spot Instances could be created [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
architecture.SkipNonAmd64SingleArch(oc)
clusterinfra.SkipForAwsOutpostCluster(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-37497"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset to Dedicated Spot Instances")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"spotMarketOptions":{},"instanceType":"c4.8xlarge","placement": {"tenancy": "dedicated"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-64909-AWS Placement group support for MAPI [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "us-east-2" && region != "us-east-1" {
g.Skip("Not support region " + region + " for the case for now.")
}
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
_, err = awsClient.GetPlacementGroupByName("pgcluster")
if err != nil {
g.Skip("There is no this placement group for testing, skip the cases!!")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-64909"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.placement.availabilityZone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if availabilityZone != "us-east-2b" && availabilityZone != "us-east-1b" {
g.Skip("Restricted to b availabilityZone testing because cluster placement group cannot span zones. But it's " + availabilityZone)
}
g.By("Update machineset with Placement group")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgcluster"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with Placement group")
placementGroupName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.placementGroupName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("placementGroupName:%s", placementGroupName)
o.Expect(placementGroupName).Should(o.Equal("pgcluster"))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-LEVEL0-Critical-25436-Scale up/scale down the cluster by changing the replicas of the machineSet [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.VSphere, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud, clusterinfra.Nutanix, clusterinfra.OpenStack, clusterinfra.Ovirt)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-25436g"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Scale up machineset")
clusterinfra.ScaleMachineSet(oc, machinesetName, 1)
g.By("Scale down machineset")
clusterinfra.ScaleMachineSet(oc, machinesetName, 0)
})
// author: [email protected]
g.It("Author:dtobolik-NonHyperShiftHOST-NonPreRelease-Medium-66866-AWS machineset support for multiple AWS security groups [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create aws security group")
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
randomMachineName := clusterinfra.ListWorkerMachineNames(oc)[0]
randomInstanceID, err := awsClient.GetAwsInstanceID(randomMachineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(randomInstanceID)
o.Expect(err).NotTo(o.HaveOccurred())
sgName := "ocp-66866-sg"
sgID, err := awsClient.CreateSecurityGroup(sgName, vpcID, "ocp-66866 testing security group")
o.Expect(err).NotTo(o.HaveOccurred())
defer awsClient.DeleteSecurityGroup(sgID)
err = awsClient.CreateTag(sgID, "Name", sgName)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new machineset")
machineSetName := infrastructureName + "-66866"
machineSet := clusterinfra.MachineSetDescription{Name: machineSetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machineSetName)
defer machineSet.DeleteMachineSet(oc)
machineSet.CreateMachineSet(oc)
g.By("Add security group to machineset")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machineSetName, "-n", "openshift-machine-api", "-p", `[{"op":"replace","path":"/spec/replicas","value":1},{"op":"add","path":"/spec/template/spec/providerSpec/value/securityGroups/-","value":{"filters":[{"name":"tag:Name","values":["`+sgName+`"]}]}}]`, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machineSetName)
g.By("Check security group is attached")
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machineSetName)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
securityGroups, err := awsClient.GetInstanceSecurityGroupIDs(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(securityGroups).Should(o.ContainElement(sgID))
})
//author [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-33058-Implement defaulting machineset values for azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
credType, err := oc.AsAdmin().Run("get").Args("cloudcredentials.operator.openshift.io/cluster", "-o=jsonpath={.spec.credentialsMode}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(credType, "Manual") {
g.Skip("Skip test on azure sts cluster")
}
mapiBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "mapi")
defaultMachinesetAzureTemplate := filepath.Join(mapiBaseDir, "default-machineset-azure.yaml")
clusterID := clusterinfra.GetInfrastructureName(oc)
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
location, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
vnet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.vnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
subnet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.subnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
networkResourceGroup, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.networkResourceGroup}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defaultMachinesetAzure := defaultMachinesetAzureDescription{
name: infrastructureName + "-33058-default",
clustername: clusterID,
template: defaultMachinesetAzureTemplate,
location: location,
vnet: vnet,
subnet: subnet,
namespace: machineAPINamespace,
networkResourceGroup: networkResourceGroup,
}
defer clusterinfra.WaitForMachinesDisapper(oc, defaultMachinesetAzure.name)
defaultMachinesetAzure.createDefaultMachineSetOnAzure(oc)
defer defaultMachinesetAzure.deleteDefaultMachineSetOnAzure(oc)
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-46966-Validation webhook check for gpus on GCP [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
skipTestIfSpotWorkers(oc)
architecture.SkipArchitectures(oc, architecture.ARM64)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-46966"
ms := clusterinfra.MachineSetDescription{machinesetName, 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
zone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(zone, "us-central1-") {
g.Skip("not valid zone for GPU machines")
}
g.By("1.Update machineset with A100 GPUs (A family) and set gpus")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"a2-highgpu-1g","gpus": [ { "count": 1,"type": "nvidia-tesla-p100" }]}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "A2 machine types have already attached gpus, additional gpus cannot be specified")).To(o.BeTrue())
g.By("2.Update machineset with nvidia-tesla-A100 Type")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"n1-standard-1","gpus": [ { "count": 1,"type": "nvidia-tesla-a100" }]}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "nvidia-tesla-a100 gpus, are only attached to the A2 machine types")).To(o.BeTrue())
g.By("3.Update machineset with other machine type families and set gpus")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"e2-medium","gpus": [ { "count": 1,"type": "nvidia-tesla-p100" }]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
out, _ = oc.AsAdmin().WithoutNamespace().Run("describe").Args(mapiMachine, machineName, "-n", machineAPINamespace).Output()
o.Expect(out).Should(o.ContainSubstring("e2-medium does not support accelerators. Only A2 and N1 machine type families support guest acceleartors"))
clusterinfra.ScaleMachineSet(oc, machinesetName, 0)
g.By("4.Update machineset with A100 GPUs (A2 family) nvidia-tesla-a100, onHostMaintenance is set to Migrate")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"a2-highgpu-1g","onHostMaintenance":"Migrate"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Forbidden: When GPUs are specified or using machineType with pre-attached GPUs(A2 machine family), onHostMaintenance must be set to Terminate")).To(o.BeTrue())
g.By("5.Update machineset with A100 GPUs (A2 family) nvidia-tesla-a100, restartPolicy with an invalid value")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"a2-highgpu-1g","restartPolicy": "Invalid","onHostMaintenance": "Terminate"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \"Invalid\": restartPolicy must be either Never or Always")).To(o.BeTrue())
g.By("6.Update machineset with A100 GPUs (A2 family) nvidia-tesla-a100, onHostMaintenance with an invalid value")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"a2-highgpu-1g","restartPolicy": "Always","onHostMaintenance": "Invalid"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \"Invalid\": onHostMaintenance must be either Migrate or Terminate")).To(o.BeTrue())
g.By("7.Update machineset with other GPU types, count with an invalid value")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"n1-standard-1","restartPolicy": "Always","onHostMaintenance": "Terminate","gpus": [ { "count": -1,"type": "nvidia-tesla-p100" }]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
machineName = clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
out, _ = oc.AsAdmin().WithoutNamespace().Run("describe").Args(mapiMachine, machineName, "-n", machineAPINamespace).Output()
o.Expect(strings.Contains(out, "Number of accelerator cards attached to an instance must be one of [1, 2, 4]") || strings.Contains(out, "AcceleratorType nvidia-tesla-p100 not available in the zone")).To(o.BeTrue())
clusterinfra.ScaleMachineSet(oc, machinesetName, 0)
g.By("8.Update machineset with other GPU types, type with an empty value")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"n1-standard-1","restartPolicy": "Always","onHostMaintenance": "Terminate","gpus": [ { "count": 1,"type": "" }]}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Required value: Type is required")).To(o.BeTrue())
g.By("9.Update machineset with other GPU types, type with an invalid value")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"n1-standard-1","restartPolicy": "Always","onHostMaintenance": "Terminate","gpus": [ { "count": 1,"type": "invalid" }]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
machineName = clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
out, _ = oc.AsAdmin().WithoutNamespace().Run("describe").Args(mapiMachine, machineName, "-n", machineAPINamespace).Output()
o.Expect(out).Should(o.ContainSubstring("AcceleratorType invalid not available"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-High-30379-New machine can join cluster when VPC has custom DHCP option set [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create a new dhcpOptions")
var newDhcpOptionsID, currentDhcpOptionsID string
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
newDhcpOptionsID, err := awsClient.CreateDhcpOptionsWithDomainName("example30379.com")
if err != nil {
g.Skip("The credential is insufficient to perform create dhcpOptions operation, skip the cases!!")
}
defer func() {
err := awsClient.DeleteDhcpOptions(newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Associate the VPC with the new dhcpOptionsId")
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
currentDhcpOptionsID, err = awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := awsClient.AssociateDhcpOptions(vpcID, currentDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = awsClient.AssociateDhcpOptions(vpcID, newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new machineset")
machinesetName := infrastructureName + "-30379"
ms := clusterinfra.MachineSetDescription{machinesetName, 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
machineNameOfMachineSet := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineNameOfMachineSet)
readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readyStatus).Should(o.Equal("True"))
internalDNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machineNameOfMachineSet, "-o=jsonpath={.status.addresses[?(@.type==\"InternalDNS\")].address}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(internalDNS, "example30379.com")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73762-New machine can join cluster when VPC has custom DHCP option set containing multiple domain names [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create a new dhcpOptions")
var newDhcpOptionsID, currentDhcpOptionsID string
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
newDhcpOptionsID, err := awsClient.CreateDhcpOptionsWithDomainName("EXAMple73762A.com. example73762b.com. eXaMpLe73762C.COM")
if err != nil {
g.Skip("The credential is insufficient to perform create dhcpOptions operation, skip the cases!!")
}
defer func() {
err := awsClient.DeleteDhcpOptions(newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Associate the VPC with the new dhcpOptionsId")
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
currentDhcpOptionsID, err = awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := awsClient.AssociateDhcpOptions(vpcID, currentDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = awsClient.AssociateDhcpOptions(vpcID, newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new machineset")
machinesetName := infrastructureName + "-73762"
ms := clusterinfra.MachineSetDescription{machinesetName, 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
machineNameOfMachineSet := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineNameOfMachineSet)
readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readyStatus).Should(o.Equal("True"))
internalDNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machineNameOfMachineSet, "-o=jsonpath={.status.addresses[?(@.type==\"InternalDNS\")].address}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(internalDNS, "EXAMple73762A.com") && strings.Contains(internalDNS, "example73762b.com") && strings.Contains(internalDNS, "eXaMpLe73762C.COM")).To(o.BeTrue())
})
//author [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-73851-Node shouldn't have uninitialized taint [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-73851"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset taint")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"taints":[{"key":"node.kubernetes.io/unreachable","effect":"NoExecute"},{"key":"anything","effect":"NoSchedule"},{"key":"node-role.kubernetes.io/infra","effect":"NoExecute"},{"key":"node.kubernetes.io/not-ready","effect":"NoExecute"}]}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check no uninitialized taint in node")
machineName := clusterinfra.GetLatestMachineFromMachineSet(oc, machinesetName)
o.Expect(machineName).NotTo(o.BeEmpty())
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName)
o.Eventually(func() bool {
readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
return err == nil && o.Expect(readyStatus).Should(o.Equal("True"))
}).WithTimeout(5 * time.Minute).WithPolling(30 * time.Second).Should(o.BeTrue())
taints, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.spec.taints}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(taints).ShouldNot(o.ContainSubstring("uninitialized"))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73668-Create machineset with Reserved Capacity [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
skipTestIfSpotWorkers(oc)
azureCloudName, azureErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(azureErr).NotTo(o.HaveOccurred())
if azureCloudName == "AzureStackCloud" || azureCloudName == "AzureUSGovernmentCloud" {
g.Skip("Skip for ASH and azure Gov due to no zone for ash, and for USGov it's hard to getclient with baseURI!")
}
exutil.By("Create a machineset")
machinesetName := infrastructureName + "-73668"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
ms.CreateMachineSet(oc)
zone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if zone == "" {
g.Skip("Zone doesn't exist, capacity reservation group cannot be set on a virtual machine which is part of an availability set!")
}
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
machineType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.vmSize}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create capacityReservationGroup and capacityReservation")
resourceGroup, err := exutil.GetAzureCredentialFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
capacityReservationGroupName := "capacityReservationGroup73668"
capacityReservationName := "capacityReservation73668"
azClientSet := exutil.NewAzureClientSetWithRootCreds(oc)
capacityReservationGroup, err := azClientSet.CreateCapacityReservationGroup(context.Background(), capacityReservationGroupName, resourceGroup, region, zone)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(capacityReservationGroup).NotTo(o.BeEmpty())
err = azClientSet.CreateCapacityReservation(context.Background(), capacityReservationGroupName, capacityReservationName, region, resourceGroup, machineType, zone)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
ms.DeleteMachineSet(oc)
clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
azClientSet.DeleteCapacityReservation(context.Background(), capacityReservationGroupName, capacityReservationName, resourceGroup)
azClientSet.DeleteCapacityReservationGroup(context.Background(), capacityReservationGroupName, resourceGroup)
}()
exutil.By("Patch machineset with valid capacityReservationGroupID")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"capacityReservationGroupID": "`+capacityReservationGroup+`"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
exutil.By("Check machine with capacityReservationGroupID")
capacityReservationGroupID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.capacityReservationGroupID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(capacityReservationGroupID).Should(o.ContainSubstring("capacityReservationGroups"))
exutil.By("Patch machineset with empty capacityReservationGroupID and set replicas to 2")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":2,"template":{"spec":{"providerSpec":{"value":{ "capacityReservationGroupID": ""}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName)
exutil.By("Check machine without capacityReservationGroupID")
machine := clusterinfra.GetLatestMachineFromMachineSet(oc, machinesetName)
capacityReservationGroupID, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machine, "-n", "openshift-machine-api", "-o=jsonpath={.spec.providerSpec.value.capacityReservationGroupID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(capacityReservationGroupID).To(o.BeEmpty())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-73669-Webhook validation for Reserved Capacity [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
skipTestIfSpotWorkers(oc)
azureCloudName, azureErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(azureErr).NotTo(o.HaveOccurred())
if azureCloudName == "AzureStackCloud" || azureCloudName == "AzureUSGovernmentCloud" {
g.Skip("Skip for ASH and azure Gov due to no zone for ash, and for USGov it's hard to getclient with baseURI!")
}
exutil.By("Create a machineset ")
machinesetName := infrastructureName + "-73669"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
zone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if zone == "" {
g.Skip("Zone doesn't exist, capacity reservation group cannot be set on a virtual machine which is part of an availability set!")
}
exutil.By("Patch machineset that the value of capacityReservationGroupID does not start with /")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{ "capacityReservationGroupID": "subscriptions/53b8f551-f0fc-4bea-8cba-6d1fefd54c8a/resourceGroups/ZHSUN-AZ9-DVD88-RG/providers/Microsoft.Compute/capacityReservationGroups/zhsun-capacity"}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("must start with '/'"))
exutil.By("Patch machineset with invalid subscriptions")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{ "capacityReservationGroupID": "/subscrip/53b8f551-f0fc-4bea-8cba-6d1fefd54c8a/resourceGroups/ZHSUN-AZ9-DVD88-RG/providers/Microsoft.Compute/capacityReservationGroups/zhsun-capacity"}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("capacityReservationGroupID: Invalid value"))
exutil.By("Patch machineset with invalid resourceGroups")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{ "capacityReservationGroupID": "/subscriptions/53b8f551-f0fc-4bea-8cba-6d1fefd54c8a/resource/ZHSUN-AZ9-DVD88-RG/providers/Microsoft.Compute/capacityReservationGroups/zhsun-capacity"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
exutil.By("Patch machineset with invalid capacityReservationGroups")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":2,"template":{"spec":{"providerSpec":{"value":{ "capacityReservationGroupID": "/subscriptions/53b8f551-f0fc-4bea-8cba-6d1fefd54c8a/resourceGroups/ZHSUN-AZ9-DVD88-RG/providers/Microsoft.Compute/capacityReservation/zhsun-capacity"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-74603-[MAPI] Support AWS Placement Group Partition Number [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "us-east-2" && region != "us-east-1" {
g.Skip("Not support region " + region + " for the case for now.")
}
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
_, err = awsClient.GetPlacementGroupByName("pgpartition3")
if err != nil {
g.Skip("There is no this placement group for testing, skip the cases!!")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-74603"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
exutil.By("Patch machineset only with valid partition placementGroupName")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgpartition3"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
exutil.By("Check machine with placementGroupName and without placementGroupPartition ")
placementGroupName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.placementGroupName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(placementGroupName).Should(o.Equal("pgpartition3"))
placementGroupPartition, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.placementGroupPartition}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(placementGroupPartition).To(o.BeEmpty())
exutil.By("Patch machineset with valid partition placementGroupName and placementGroupPartition")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":2,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgpartition3", "placementGroupPartition":2}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName)
exutil.By("Check machine with placementGroupName and placementGroupPartition")
machine := clusterinfra.GetLatestMachineFromMachineSet(oc, machinesetName)
placementGroupName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machine, "-n", "openshift-machine-api", "-o=jsonpath={.spec.providerSpec.value.placementGroupName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(placementGroupName).Should(o.Equal("pgpartition3"))
placementGroupPartition, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machine, "-n", "openshift-machine-api", "-o=jsonpath={.spec.providerSpec.value.placementGroupPartition}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(placementGroupPartition).Should(o.Equal("2"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-75037-[MAPI] Webhook validation for AWS Placement Group Partition Number [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "us-east-2" && region != "us-east-1" {
g.Skip("Not support region " + region + " for the case for now.")
}
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
_, err = awsClient.GetPlacementGroupByName("pgpartition3")
if err != nil {
g.Skip("There is no this placement group for testing, skip the cases!!")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-75037"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
exutil.By("Update machineset with invalid Placement group partition nubmer")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgpartition3", "placementGroupPartition":0}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("placementGroupPartition: Invalid value: 0: providerSpec.placementGroupPartition must be between 1 and 7"))
exutil.By("Update machineset with placementGroupPartition but without placementGroupName")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupPartition":2}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("placementGroupPartition: Invalid value: 2: providerSpec.placementGroupPartition is set but providerSpec.placementGroupName is empty"))
exutil.By("Patch machineset with valid placementGroupPartition but cluster placementGroupName")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgcluster", "placementGroupPartition":2}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
exutil.By("Patch machineset with invalid placementGroupPartition of the partition placementGroupName")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgpartition3", "placementGroupPartition":4}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-24721-Add support for machine tags [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
exutil.By("Create a machineset")
machinesetName := infrastructureName + "-24721"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
exutil.By("Update machineset with tags")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"tags":{"key24721a":"value24721a","key24721b":"value24721b"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
exutil.By("Check machine with tags")
tags, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.tags}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("tags:%s", tags)
o.Expect(tags).Should(o.ContainSubstring("key24721b"))
})
//author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-52602-Drain operation should be asynchronous from the other machine operations [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.IBMCloud, clusterinfra.Nutanix, clusterinfra.VSphere, clusterinfra.OpenStack)
exutil.By("Create a new machineset")
machinesetName := infrastructureName + "-52602"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
exutil.By("Scale machineset to 5")
clusterinfra.ScaleMachineSet(oc, machinesetName, 5)
exutil.By("Create PDB")
miscDir := exutil.FixturePath("testdata", "clusterinfrastructure", "misc")
pdbTemplate := filepath.Join(miscDir, "pdb.yaml")
workloadTemplate := filepath.Join(miscDir, "workload-with-label.yaml")
pdb := PodDisruptionBudget{name: "pdb-52602", namespace: machineAPINamespace, template: pdbTemplate, label: "label-52602"}
workLoad := workLoadDescription{name: "workload-52602", namespace: machineAPINamespace, template: workloadTemplate, label: "label-52602"}
defer pdb.deletePDB(oc)
pdb.createPDB(oc)
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
exutil.By("Delete machines")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "--wait=false").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check machines can quickly be created without waiting for the other Nodes to drain.")
o.Eventually(func() bool {
machineNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[*].metadata.name}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
machines := strings.Fields(machineNames)
if len(machines) == 10 {
return true
}
return false
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-76367-[MAPI] Allow creating Nutanix worker VMs with GPUs [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix)
// skip zones other than Development-GPU
zones, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-o=jsonpath={.items[*].metadata.labels.machine\\.openshift\\.io\\/zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(zones, "Development-GPU") {
g.Skip(fmt.Sprintf("this case can be only run in Development-GPU zone, but is's %s", zones))
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-76367"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with gpus")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"gpus":[{"type":"Name","name":"Tesla T4 compute"}]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with gpus")
gpus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.gpus}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("gpus:%s", gpus)
o.Expect(strings.Contains(gpus, "Tesla T4 compute")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-76366-[MAPI] Allow creating Nutanix VMs with multiple disks [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-76366"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with data disks")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"dataDisks":[{"deviceProperties":{"deviceType":"Disk","adapterType":"SCSI","deviceIndex":1},"diskSize":"1Gi","storageConfig":{"diskMode":"Standard"}}]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with data disks")
dataDisks, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.dataDisks}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("dataDisks:%s", dataDisks)
o.Expect(strings.Contains(dataDisks, "SCSI")).To(o.BeTrue())
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | 1bf5b50f-d886-4617-b5d9-729aef1e9e2b | Author:zhsun-NonHyperShiftHOST-Medium-45772-MachineSet selector is immutable | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-45772-MachineSet selector is immutable", func() {
g.By("Create a new machineset")
clusterinfra.SkipConditionally(oc)
machinesetName := infrastructureName + "-45772"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with empty clusterID")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"selector":{"matchLabels":{"machine.openshift.io/cluster-api-cluster": null}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("selector is immutable"))
}) | |||||
test case | openshift/openshift-tests-private | 4755f860-8fe2-451f-abda-210d79a58d91 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-45377-Enable accelerated network via MachineSets on azure [Disruptive] | ['"fmt"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-45377-Enable accelerated network via MachineSets on azure [Disruptive]", func() {
g.By("Create a new machineset with acceleratedNetworking: true")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
machinesetName := infrastructureName + "-45377"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
arch, err := clusterinfra.GetArchitectureFromMachineSet(oc, machinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
var vmSize string
switch arch {
case architecture.AMD64:
vmSize = "Standard_D2s_v3"
case architecture.ARM64:
vmSize = "Standard_D8ps_v5"
default:
g.Skip("This case doesn't support other architectures than arm64, amd64")
}
g.By("Update machineset with acceleratedNetworking: true")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n",
machineAPINamespace, "-p",
fmt.Sprintf(`{"spec":{"replicas":1,"template":{"spec":{"providerSpec":`+
`{"value":{"acceleratedNetworking":true,"vmSize":"%s"}}}}}}`, vmSize),
"--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//test when set acceleratedNetworking: true, machine running needs nearly 9 minutes. so change the method timeout as 10 minutes.
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with acceleratedNetworking: true")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.acceleratedNetworking}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(out).To(o.ContainSubstring("true"))
}) | |||||
test case | openshift/openshift-tests-private | a6236bce-6a8b-46ac-8dfe-7cd046c23d33 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-46967-Implement Ephemeral OS Disks - OS cache placement on azure [Disruptive] | ['"fmt"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-46967-Implement Ephemeral OS Disks - OS cache placement on azure [Disruptive]", func() {
g.By("Create a new machineset with Ephemeral OS Disks - OS cache placement")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
skipTestIfSpotWorkers(oc)
machinesetName := infrastructureName + "-46967"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
arch, err := clusterinfra.GetArchitectureFromMachineSet(oc, machinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
var vmSize string
switch arch {
case architecture.AMD64:
vmSize = "Standard_D2s_v3"
case architecture.ARM64:
vmSize = "Standard_D2plds_v5"
default:
g.Skip("This case doesn't support other architectures than arm64, amd64")
}
g.By("Update machineset with Ephemeral OS Disks - OS cache placement")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n",
machineAPINamespace, "-p",
fmt.Sprintf(`{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"vmSize":"%s",`+
`"osDisk":{"diskSizeGB":30,"cachingType":"ReadOnly","diskSettings":{"ephemeralStorageLocation":"Local"},`+
`"managedDisk":{"storageAccountType":""}}}}}}}}`, vmSize), "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with Ephemeral OS Disks - OS cache placement")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.osDisk.diskSettings.ephemeralStorageLocation}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(out).To(o.ContainSubstring("Local"))
}) | |||||
test case | openshift/openshift-tests-private | 515d9554-0c69-4c75-a1d0-0066f3187027 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-46303-Availability sets could be created when needed for azure [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-46303-Availability sets could be created when needed for azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
defaultWorkerMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, defaultWorkerMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "northcentralus" && region != "westus" {
/*
This case only supports on a region which doesn't have zones.
These two regions cover most of the templates in flexy-templates and they don't have zones,
so restricting the test is only applicable in these two regions.
*/
g.Skip("Skip this test scenario because the test is only applicable in \"northcentralus\" or \"westus\" region")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-46303"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with availabilitySet already created for the default worker machineset")
/*
If the availability set is not created for the default worker machineset,
machine status will be failed and error message shows "Availability Set cannot be found".
Therefore, if machine created successfully with the availability set,
then it can prove that the availability set has been created when the default worker machineset is created.
*/
availabilitySetName := defaultWorkerMachinesetName + "-as"
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"availabilitySet":"`+availabilitySetName+`"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with availabilitySet")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.availabilitySet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("availability set name is: %s", out)
o.Expect(out == availabilitySetName).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | a8e56125-281c-4524-8cbf-8b4a98cb89f3 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-47177-Medium-47201-[MDH] Machine Deletion Hooks appropriately block lifecycle phases [Disruptive] | ['"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-47177-Medium-47201-[MDH] Machine Deletion Hooks appropriately block lifecycle phases [Disruptive]", func() {
g.By("Create a new machineset with lifecycle hook")
clusterinfra.SkipConditionally(oc)
machinesetName := infrastructureName + "-47177-47201"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with lifecycle hook")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"lifecycleHooks":{"preDrain":[{"name":"drain1","owner":"drain-controller1"}],"preTerminate":[{"name":"terminate2","owner":"terminate-controller2"}]}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Delete newly created machine by scaling " + machinesetName + " to 0")
err = oc.AsAdmin().WithoutNamespace().Run("scale").Args("--replicas=0", "-n", "openshift-machine-api", mapiMachineset, machinesetName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for machine to go into Deleting phase")
err = wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].status.phase}").Output()
if output != "Deleting" {
e2e.Logf("machine is not in Deleting phase and waiting up to 2 seconds ...")
return false, nil
}
e2e.Logf("machine is in Deleting phase")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Check machine phase failed")
g.By("Check machine stuck in Deleting phase because of lifecycle hook")
outDrain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].status.conditions[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("outDrain:%s", outDrain)
o.Expect(strings.Contains(outDrain, "\"message\":\"Drain operation currently blocked by: [{Name:drain1 Owner:drain-controller1}]\"") && strings.Contains(outDrain, "\"reason\":\"HookPresent\"") && strings.Contains(outDrain, "\"status\":\"False\"") && strings.Contains(outDrain, "\"type\":\"Drainable\"")).To(o.BeTrue())
outTerminate, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].status.conditions[2]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("outTerminate:%s", outTerminate)
o.Expect(strings.Contains(outTerminate, "\"message\":\"Terminate operation currently blocked by: [{Name:terminate2 Owner:terminate-controller2}]\"") && strings.Contains(outTerminate, "\"reason\":\"HookPresent\"") && strings.Contains(outTerminate, "\"status\":\"False\"") && strings.Contains(outTerminate, "\"type\":\"Terminable\"")).To(o.BeTrue())
g.By("Update machine without lifecycle hook")
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachine, machineName, "-n", "openshift-machine-api", "-p", `[{"op": "remove", "path": "/spec/lifecycleHooks/preDrain"},{"op": "remove", "path": "/spec/lifecycleHooks/preTerminate"}]`, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}) | |||||
test case | openshift/openshift-tests-private | 10dad4e0-fa1b-4dac-a467-234affd13879 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-47230-[MDH] Negative lifecycle hook validation [Disruptive] | ['"strconv"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-47230-[MDH] Negative lifecycle hook validation [Disruptive]", func() {
g.By("Create a new machineset")
clusterinfra.SkipConditionally(oc)
machinesetName := infrastructureName + "-47230"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
checkItems := []struct {
patchstr string
errormsg string
}{
{
patchstr: `{"spec":{"lifecycleHooks":{"preTerminate":[{"name":"","owner":"drain-controller1"}]}}}`,
errormsg: "name in body should be at least 3 chars long",
},
{
patchstr: `{"spec":{"lifecycleHooks":{"preDrain":[{"name":"drain1","owner":""}]}}}`,
errormsg: "owner in body should be at least 3 chars long",
},
{
patchstr: `{"spec":{"lifecycleHooks":{"preDrain":[{"name":"drain1","owner":"drain-controller1"},{"name":"drain1","owner":"drain-controller2"}]}}}`,
errormsg: "Duplicate value: map[string]interface {}{\"name\":\"drain1\"}",
},
}
for i, checkItem := range checkItems {
g.By("Update machine with invalid lifecycle hook")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachine, machineName, "-n", "openshift-machine-api", "-p", checkItem.patchstr, "--type=merge").Output()
e2e.Logf("out"+strconv.Itoa(i)+":%s", out)
o.Expect(strings.Contains(out, checkItem.errormsg)).To(o.BeTrue())
}
}) | |||||
test case | openshift/openshift-tests-private | 8a474670-23dd-49c2-a2fa-49bf75ca0293 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-44977-Machine with GPU is supported on gcp [Disruptive] | ['"strings"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-44977-Machine with GPU is supported on gcp [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
architecture.SkipArchitectures(oc, architecture.ARM64)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-44977"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
arch, err := clusterinfra.GetArchitectureFromMachineSet(oc, machinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
if arch != architecture.AMD64 {
g.Skip("The selected machine set's arch is not amd64, skip this case!")
}
//check supported zone for gpu
zone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(zone, "us-central1-") {
g.Skip("not valid zone for GPU machines")
}
g.By("Update machineset with GPU")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{ "gpus": [ { "count": 1,"type": "nvidia-tesla-p100" }],"machineType":"n1-standard-1", "zone":"us-central1-c", "onHostMaintenance":"Terminate","restartPolicy":"Always"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with GPU")
gpuType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.gpus[0].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
onHostMaintenance, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.onHostMaintenance}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
restartPolicy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.restartPolicy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("gpuType:%s, onHostMaintenance:%s, restartPolicy:%s", gpuType, onHostMaintenance, restartPolicy)
o.Expect(strings.Contains(gpuType, "nvidia-tesla-p100") && strings.Contains(onHostMaintenance, "Terminate") && strings.Contains(restartPolicy, "Always")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 1f4509c5-4c3a-4d84-b25a-384d09cfcc5c | Author:zhsun-NonHyperShiftHOST-Medium-48363-Machine providerID should be consistent with node providerID | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-48363-Machine providerID should be consistent with node providerID", func() {
g.By("Check machine providerID and node providerID are consistent")
clusterinfra.SkipConditionally(oc)
machineList := clusterinfra.ListAllMachineNames(oc)
for _, machineName := range machineList {
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName)
if nodeName == "" {
continue
}
machineProviderID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machineName, "-o=jsonpath={.spec.providerID}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeProviderID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.spec.providerID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(machineProviderID).Should(o.Equal(nodeProviderID))
}
}) | |||||
test case | openshift/openshift-tests-private | cb7120a2-3a35-4b70-bb2c-27c58a31d98a | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-High-35513-Windows machine should successfully provision for aws [Disruptive] | ['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-High-35513-Windows machine should successfully provision for aws [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
architecture.SkipNonAmd64SingleArch(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-35513"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
var amiID string
switch region {
case "us-east-1", "us-iso-east-1":
amiID = "ami-0e09e139aca053387"
case "us-east-2":
amiID = "ami-0f4f40c1e7ef56be6"
default:
e2e.Logf("Not support region for the case for now.")
g.Skip("Not support region for the case for now.")
}
g.By("Update machineset with windows ami")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"metadata":{"labels":{"machine.openshift.io/os-id": "Windows"}},"spec":{"providerSpec":{"value":{"ami":{"id":"`+amiID+`"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineProvisioned(oc, machinesetName)
}) | |||||
test case | openshift/openshift-tests-private | b4405dca-50d2-4400-a005-3806be83b260 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-48012-Change AWS EBS GP3 IOPS in MachineSet should take affect on aws [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-48012-Change AWS EBS GP3 IOPS in MachineSet should take affect on aws [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
clusterinfra.SkipForAwsOutpostCluster(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-48012"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with gp3 iops 5000")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"blockDevices":[{"ebs":{"volumeType":"gp3","iops":5000}}]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check on aws instance with gp3 iops 5000")
instanceID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-o=jsonpath={.items[0].status.providerStatus.instanceId}", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.GetAwsCredentialFromCluster(oc)
volumeInfo, err := clusterinfra.GetAwsVolumeInfoAttachedToInstanceID(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("volumeInfo:%s", volumeInfo)
o.Expect(strings.Contains(volumeInfo, "\"Iops\":5000") && strings.Contains(volumeInfo, "\"VolumeType\":\"gp3\"")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 1032bfef-4820-44ce-9c02-fb6a523ea578 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-33040-Required configuration should be added to the ProviderSpec to enable spot instances - azure [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-33040-Required configuration should be added to the ProviderSpec to enable spot instances - azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region == "northcentralus" || region == "westus" || region == "usgovtexas" {
g.Skip("Skip this test scenario because it is not supported on the " + region + " region, because this region doesn't have zones")
}
g.By("Create a spot instance on azure")
clusterinfra.SkipConditionally(oc)
machinesetName := infrastructureName + "-33040"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"spotVMOptions":{}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine and node were labelled as an `interruptible-instance`")
machine, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(machine).NotTo(o.BeEmpty())
node, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-n", machineAPINamespace, "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(node).NotTo(o.BeEmpty())
}) | |||||
test case | openshift/openshift-tests-private | 7d73effc-fd6d-46ff-a412-d00fb46329d1 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-48594-AWS EFA network interfaces should be supported via machine api [Disruptive] | ['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-48594-AWS EFA network interfaces should be supported via machine api [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
architecture.SkipNonAmd64SingleArch(oc)
clusterinfra.SkipForAwsOutpostCluster(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "us-east-2" && region != "us-east-1" {
g.Skip("Not support region " + region + " for the case for now.")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-48594"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with networkInterfaceType: EFA")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"networkInterfaceType":"EFA","instanceType":"m5dn.24xlarge"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with networkInterfaceType: EFA")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.networkInterfaceType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(out).Should(o.Equal("EFA"))
}) | |||||
test case | openshift/openshift-tests-private | 8554f077-fc8e-4f03-9106-f1aa72c0790f | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-48595-Negative validation for AWS NetworkInterfaceType [Disruptive] | ['"strings"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-48595-Negative validation for AWS NetworkInterfaceType [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
architecture.SkipNonAmd64SingleArch(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "us-east-2" && region != "us-east-1" {
g.Skip("Not support region " + region + " for the case for now.")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-48595"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with networkInterfaceType: invalid")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"networkInterfaceType":"invalid","instanceType":"m5dn.24xlarge"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value")).To(o.BeTrue())
g.By("Update machineset with not supported instance types")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"networkInterfaceType":"EFA","instanceType":"m6i.xlarge"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].status.errorMessage}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(strings.Contains(out, "not supported")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | f2699bd6-d80b-4292-9fa6-6902af44c751 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-49827-Ensure pd-balanced disk is supported on GCP via machine api [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-49827-Ensure pd-balanced disk is supported on GCP via machine api [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-49827"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with invalid disk type")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `[{"op":"replace","path":"/spec/template/spec/providerSpec/value/disks/0/type","value":"invalid"}]`, "--type=json").Output()
o.Expect(strings.Contains(out, "Unsupported value")).To(o.BeTrue())
g.By("Update machineset with pd-balanced disk type")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `[{"op":"replace","path":"/spec/replicas","value": 1},{"op":"replace","path":"/spec/template/spec/providerSpec/value/disks/0/type","value":"pd-balanced"}]`, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with pd-balanced disk type")
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.disks[0].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(out).Should(o.Equal("pd-balanced"))
}) | |||||
test case | openshift/openshift-tests-private | 6d793a4b-7ffe-403a-8eed-989dc50e89da | Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-50731-Enable IMDSv2 on existing worker machines via machine set [Disruptive][Slow] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-50731-Enable IMDSv2 on existing worker machines via machine set [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-50731"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with imds required")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"metadataServiceOptions":{"authentication":"Required"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.metadataServiceOptions.authentication}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("out:%s", out)
o.Expect(out).Should(o.ContainSubstring("Required"))
g.By("Update machineset with imds optional")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"metadataServiceOptions":{"authentication":"Optional"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMachine, machineName, "-n", machineAPINamespace).Execute()
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[*].spec.providerSpec.value.metadataServiceOptions.authentication}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("Optional"))
g.By("Update machine with invalid authentication ")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"metadataServiceOptions":{"authentication":"invalid"}}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \"invalid\": Allowed values are either 'Optional' or 'Required'")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 411545fb-64ac-4284-bf04-858f56de8277 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-37915-Creating machines using KMS keys from AWS [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-37915-Creating machines using KMS keys from AWS [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
clusterinfra.SkipForAwsOutpostCluster(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
kmsClient := exutil.NewKMSClient(region)
key, err := kmsClient.CreateKey(infrastructureName + " key 37915")
if err != nil {
g.Skip("Create key failed, skip the cases!!")
}
defer func() {
err := kmsClient.DeleteKey(key)
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Create a new machineset")
machinesetName := infrastructureName + "-37915"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"blockDevices": [{"ebs":{"encrypted":true,"iops":0,"kmsKey":{"arn":"`+key+`"},"volumeSize":120,"volumeType":"gp2"}}]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with KMS keys")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.blockDevices[0].ebs.kmsKey.arn}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("arn:aws:kms"))
}) | |||||
test case | openshift/openshift-tests-private | 1c7c53bb-57aa-403e-9494-1ef5fb1ea2be | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-52471-Enable configuration of boot diagnostics when creating VMs on azure [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-52471-Enable configuration of boot diagnostics when creating VMs on azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
g.By("Create a machineset configuring boot diagnostics with Azure managed storage accounts")
machinesetName := infrastructureName + "-52471-1"
ms1 := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms1.DeleteMachineSet(oc)
ms1.CreateMachineSet(oc)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"AzureManaged"}}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.diagnostics.boot.storageAccountType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("AzureManaged"))
g.By("Create machineset configuring boot diagnostics with Customer managed storage accounts")
machinesetName = infrastructureName + "-52471-2"
ms2 := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms2.DeleteMachineSet(oc)
ms2.CreateMachineSet(oc)
storageAccount, _, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
cloudName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
storageAccountURISuffix := ".blob.core.windows.net/"
if strings.ToLower(cloudName) == "azureusgovernmentcloud" {
storageAccountURISuffix = ".blob.core.usgovcloudapi.net/"
}
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"CustomerManaged","customerManaged":{"storageAccountURI":"https://`+storageAccount+storageAccountURISuffix+`"}}}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.diagnostics.boot.storageAccountType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("CustomerManaged"))
}) | |||||
test case | openshift/openshift-tests-private | df9827b4-31e6-478f-b3cd-f7d03ab03f8e | Author:zhsun-NonHyperShiftHOST-Medium-52473-Webhook validations for azure boot diagnostics [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-52473-Webhook validations for azure boot diagnostics [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
g.By("Create a machineset")
machinesetName := infrastructureName + "-52473-1"
ms1 := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms1.DeleteMachineSet(oc)
ms1.CreateMachineSet(oc)
g.By("Update machineset with invalid storage account type")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"AzureManaged-invalid"}}}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("storageAccountType must be one of: AzureManaged, CustomerManaged"))
g.By("Update machineset with Customer Managed boot diagnostics, with a missing storage account URI")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"CustomerManaged"}}}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("customerManaged configuration must be provided"))
g.By("Update machineset Azure managed storage accounts")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"AzureManaged","customerManaged":{"storageAccountURI":"https://clusterqa2ob.blob.core.windows.net"}}}}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("customerManaged may not be set when type is AzureManaged"))
g.By("Update machineset with invalid storageAccountURI")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"CustomerManaged","customerManaged":{"storageAccountURI":"https://clusterqa2ob.blob.core.windows.net.invalid"}}}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
g.By("Update machineset with invalid storage account")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":2,"template":{"spec":{"providerSpec":{"value":{"diagnostics":{"boot":{"storageAccountType":"CustomerManaged","customerManaged":{"storageAccountURI":"https://invalid.blob.core.windows.net"}}}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
}) | |||||
test case | openshift/openshift-tests-private | bf39dca4-d83a-4ade-b6aa-760ee1700de5 | Author:miyadav-NonHyperShiftHOST-Low-36489-Machineset creation when publicIP:true in disconnected or normal (stratergy private or public) azure,aws,gcp enviroment [Disruptive] | ['"fmt"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:miyadav-NonHyperShiftHOST-Low-36489-Machineset creation when publicIP:true in disconnected or normal (stratergy private or public) azure,aws,gcp enviroment [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure, clusterinfra.AWS, clusterinfra.GCP)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-36489"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
iaasPlatform := clusterinfra.CheckPlatform(oc)
publicZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns", "cluster", "-n", "openshift-dns", "-o=jsonpath={.spec.publicZone}").Output()
if err != nil {
g.Fail("Issue with dns setup")
}
g.By("Update machineset with publicIP: true")
switch iaasPlatform {
case clusterinfra.AWS:
msg, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"publicIP": true}}}}}}`, "--type=merge").Output()
if publicZone == "" && iaasPlatform == clusterinfra.Azure {
o.Expect(msg).To(o.ContainSubstring("publicIP is not allowed in Azure disconnected installation with publish strategy as internal"))
} else {
o.Expect(err).NotTo(o.HaveOccurred())
//to scale up machineset with publicIP: true
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas": 1}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
}
case clusterinfra.Azure:
msg, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"publicIP": true}}}}}}`, "--type=merge").Output()
if publicZone == "" && iaasPlatform == clusterinfra.Azure {
o.Expect(msg).To(o.ContainSubstring("publicIP is not allowed in Azure disconnected installation with publish strategy as internal"))
} else {
o.Expect(err).NotTo(o.HaveOccurred())
//to scale up machineset with publicIP: true
//OutboundRules for VMs with public IpConfigurations with capi installation cannot provision publicIp(Limitation Azure)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas": 1}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
g.By("Check machineset with publicIP: true is not allowed for Azure")
status, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args(mapiMachine, machineName, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(status, "NicWithPublicIpCannotReferencePoolWithOutboundRule"))
}
case clusterinfra.GCP:
network, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.networkInterfaces[0].network}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
subnetwork, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.networkInterfaces[0].subnetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
patchString := fmt.Sprintf(`{"spec":{"template":{"spec":{"providerSpec":{"value":{"networkInterfaces":[{"network":"%s","subnetwork":"%s","publicIP": true}]}}}},"replicas":1}}`, network, subnetwork)
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", patchString, "--type=merge").Output()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
}
}) | |||||
test case | openshift/openshift-tests-private | 3d8f1a43-fe57-47b8-b66b-29b43c5dd278 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-51013-machine api should issue client cert when AWS DNS suffix missing [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-51013-machine api should issue client cert when AWS DNS suffix missing [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create a new dhcpOptions")
var newDhcpOptionsID, currentDhcpOptionsID string
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
newDhcpOptionsID, err := awsClient.CreateDhcpOptions()
if err != nil {
g.Skip("The credential is insufficient to perform create dhcpOptions operation, skip the cases!!")
}
defer awsClient.DeleteDhcpOptions(newDhcpOptionsID)
g.By("Associate the VPC with the new dhcpOptionsId")
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
currentDhcpOptionsID, err = awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
defer awsClient.AssociateDhcpOptions(vpcID, currentDhcpOptionsID)
err = awsClient.AssociateDhcpOptions(vpcID, newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new machineset")
machinesetName := infrastructureName + "-51013"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
nodeName := clusterinfra.GetNodeNameFromMachine(oc, clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0])
e2e.Logf("nodeName:%s", nodeName)
o.Expect(strings.HasPrefix(nodeName, "ip-")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 3a475d5c-b597-42ca-9589-6b6b19e655bd | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-59718-[Nutanix] Support bootType categories and project fields of NutanixMachineProviderConfig [Disruptive] | ['"fmt"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-59718-[Nutanix] Support bootType categories and project fields of NutanixMachineProviderConfig [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix)
// skip zones other than Development-LTS
zones, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-o=jsonpath={.items[*].metadata.labels.machine\\.openshift\\.io\\/zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(zones, "Development-LTS") {
g.Skip(fmt.Sprintf("this case can be only run in Development-LTS zone, but is's %s", zones))
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-59718"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset adding these new fields")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"bootType":"Legacy","categories":[{"key":"AppType","value":"Kubernetes"},{"key":"Environment","value":"Testing"}],"project":{"type":"name","name":"qe-project"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with these new fields")
bootType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.bootType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
categories, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.categories}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
projectName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.project.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("bootType:%s, categories:%s, projectName:%s", bootType, categories, projectName)
o.Expect(strings.Contains(bootType, "Legacy") && strings.Contains(categories, "Kubernetes") && strings.Contains(categories, "Testing") && strings.Contains(projectName, "qe-project")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 63e375e1-0c9d-4cc7-94a2-d959677825e5 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-59760-Create confidential compute VMs on GCP [Disruptive] | ['"strings"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-59760-Create confidential compute VMs on GCP [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
//We should enable this case when Google provide this support for their ARM Machines
//https://issues.redhat.com/browse/OCPQE-22305
architecture.SkipArchitectures(oc, architecture.ARM64)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-59760"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
arch, err := clusterinfra.GetArchitectureFromMachineSet(oc, machinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
if arch != architecture.AMD64 {
g.Skip("The selected machine set's arch is not amd64, skip this case!")
}
g.By("Update machineset with confidential compute options")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"`+clusterinfra.GetInstanceTypeValuesByProviderAndArch(clusterinfra.GCP, arch)[1]+`","onHostMaintenance":"Terminate","confidentialCompute":"Enabled"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with confidentialCompute enabled")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.confidentialCompute}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("Enabled"))
g.By("Validate onHostMaintenance should be set to terminate in case confidential compute is enabled")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"`+clusterinfra.GetInstanceTypeValuesByProviderAndArch(clusterinfra.GCP, arch)[1]+`","onHostMaintenance":"invalid","confidentialCompute":"Enabled"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \"invalid\": onHostMaintenance must be either Migrate or Terminate")).To(o.BeTrue())
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"`+clusterinfra.GetInstanceTypeValuesByProviderAndArch(clusterinfra.GCP, arch)[1]+`","onHostMaintenance":"Migrate","confidentialCompute":"Enabled"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \"Migrate\": ConfidentialCompute require OnHostMaintenance to be set to Terminate, the current value is: Migrate")).To(o.BeTrue())
g.By("Validate the instance type support confidential computing")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"`+clusterinfra.GetInstanceTypeValuesByProviderAndArch(clusterinfra.GCP, arch)[0]+`","onHostMaintenance":"Terminate","confidentialCompute":"Enabled"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \""+clusterinfra.GetInstanceTypeValuesByProviderAndArch(clusterinfra.GCP, arch)[0]+"\": ConfidentialCompute require machine type in the following series: n2d,c2d")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | e985a47f-a90f-427f-9ec2-f7a37fb9261d | Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-57438-Add support to Shielded VMs on GCP [Disruptive] | ['"strings"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-57438-Add support to Shielded VMs on GCP [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
//We should enable this case when the bug fixed
//https://issues.redhat.com/browse/OCPBUGS-17904
architecture.SkipArchitectures(oc, architecture.ARM64)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-57438"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
arch, err := clusterinfra.GetArchitectureFromMachineSet(oc, machinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
if arch != architecture.AMD64 {
g.Skip("The selected machine set's arch is not amd64, skip this case!")
}
g.By("Update machineset with shieldedInstanceConfig compute options Enabled")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"shieldedInstanceConfig": {"secureBoot": "Enabled","integrityMonitoring": "Enabled","virtualizedTrustedPlatformModule": "Enabled"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with shieldedInstanceConfig options enabled")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.shieldedInstanceConfig}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("{\"integrityMonitoring\":\"Enabled\",\"secureBoot\":\"Enabled\",\"virtualizedTrustedPlatformModule\":\"Enabled\"}"))
g.By("Validate the webhooks warns with invalid values of shieldedVM config")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"shieldedInstanceConfig": {"secureBoot": "nabled","integrityMonitoring": "Enabled","virtualizedTrustedPlatformModule": "Enabled"}}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "secureBoot must be either Enabled or Disabled")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 683f1d9b-a5e0-4689-9e2e-fb43eb33b4ff | Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-High-48464-Dedicated tenancy should be exposed on aws providerspec [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-High-48464-Dedicated tenancy should be exposed on aws providerspec [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
clusterinfra.SkipForAwsOutpostCluster(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-48464"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset to have dedicated tenancy ")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placement": {"tenancy": "dedicated"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine available with dedicated tenancy ")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.placement.tenancy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("dedicated"))
g.By("Validate the webhooks warns with invalid values of tenancy config")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placement": {"tenancy": "invalid"}}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid providerSpec.tenancy, the only allowed options are: default, dedicated, host")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 3311af35-9df8-45fb-bd61-2a68a2c51662 | Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-High-39639-host-based disk encryption at VM on azure platform [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-High-39639-host-based disk encryption at VM on azure platform [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-39639"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset to have encryption at host enabled ")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"securityProfile": {"encryptionAtHost": true}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine available with encrytption enabled ")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.securityProfile}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("{\"encryptionAtHost\":true"))
}) | |||||
test case | openshift/openshift-tests-private | 5e62f7ca-1bb7-4d50-a3fd-edbc30b3dba9 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-32269-Implement validation/defaulting for AWS [Disruptive] | ['"path/filepath"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-32269-Implement validation/defaulting for AWS [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
mapiBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "mapi")
defaultMachinesetAwsTemplate := filepath.Join(mapiBaseDir, "default-machineset-aws.yaml")
clusterID := clusterinfra.GetInfrastructureName(oc)
masterArchtype := architecture.GetControlPlaneArch(oc)
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
msArchtype, err := clusterinfra.GetArchitectureFromMachineSet(oc, randomMachinesetName)
o.Expect(err).NotTo(o.HaveOccurred())
if masterArchtype != msArchtype {
g.Skip("The selected machine set's arch is not the same with the master machine's arch, skip this case!")
}
amiID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.ami.id}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.placement.availabilityZone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sgName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.securityGroups[0].filters[0].values[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
subnet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.subnet.filters[0].values[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if subnet == "" {
subnet, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.subnet.id}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defaultMachinesetAwsTemplate = filepath.Join(mapiBaseDir, "default-machineset-aws-id.yaml")
}
iamInstanceProfileID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.iamInstanceProfile.id}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defaultMachinesetAws := defaultMachinesetAwsDescription{
name: infrastructureName + "-32269-default",
clustername: clusterID,
template: defaultMachinesetAwsTemplate,
amiID: amiID,
availabilityZone: availabilityZone,
sgName: sgName,
subnet: subnet,
namespace: machineAPINamespace,
iamInstanceProfileID: iamInstanceProfileID,
}
defer clusterinfra.WaitForMachinesDisapper(oc, defaultMachinesetAws.name)
defer defaultMachinesetAws.deleteDefaultMachineSetOnAws(oc)
defaultMachinesetAws.createDefaultMachineSetOnAws(oc)
instanceTypeMachine, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+defaultMachinesetAws.name, "-o=jsonpath={.items[0].spec.providerSpec.value.instanceType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
switch arch := architecture.ClusterArchitecture(oc); arch {
case architecture.AMD64:
o.Expect(instanceTypeMachine).Should(o.Equal("m5.large"))
case architecture.ARM64:
o.Expect(instanceTypeMachine).Should(o.Equal("m6g.large"))
default:
e2e.Logf("ignoring the validation of the instanceType for cluster architecture %s", arch.String())
}
}) | |||||
test case | openshift/openshift-tests-private | e61187cf-827f-424f-ba65-8c926ff4d8ca | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-37497-ClusterInfrastructure Dedicated Spot Instances could be created [Disruptive] | ['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-37497-ClusterInfrastructure Dedicated Spot Instances could be created [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
architecture.SkipNonAmd64SingleArch(oc)
clusterinfra.SkipForAwsOutpostCluster(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-37497"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset to Dedicated Spot Instances")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"spotMarketOptions":{},"instanceType":"c4.8xlarge","placement": {"tenancy": "dedicated"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
}) | |||||
test case | openshift/openshift-tests-private | 331bf4bd-4bc0-46ab-bc2e-75263a57f053 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-64909-AWS Placement group support for MAPI [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-64909-AWS Placement group support for MAPI [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "us-east-2" && region != "us-east-1" {
g.Skip("Not support region " + region + " for the case for now.")
}
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
_, err = awsClient.GetPlacementGroupByName("pgcluster")
if err != nil {
g.Skip("There is no this placement group for testing, skip the cases!!")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-64909"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.placement.availabilityZone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if availabilityZone != "us-east-2b" && availabilityZone != "us-east-1b" {
g.Skip("Restricted to b availabilityZone testing because cluster placement group cannot span zones. But it's " + availabilityZone)
}
g.By("Update machineset with Placement group")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgcluster"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with Placement group")
placementGroupName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.placementGroupName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("placementGroupName:%s", placementGroupName)
o.Expect(placementGroupName).Should(o.Equal("pgcluster"))
}) | |||||
test case | openshift/openshift-tests-private | a970db55-e957-45ed-937f-a0a93be6a707 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-LEVEL0-Critical-25436-Scale up/scale down the cluster by changing the replicas of the machineSet [Disruptive][Slow] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-LEVEL0-Critical-25436-Scale up/scale down the cluster by changing the replicas of the machineSet [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.VSphere, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud, clusterinfra.Nutanix, clusterinfra.OpenStack, clusterinfra.Ovirt)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-25436g"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Scale up machineset")
clusterinfra.ScaleMachineSet(oc, machinesetName, 1)
g.By("Scale down machineset")
clusterinfra.ScaleMachineSet(oc, machinesetName, 0)
}) | |||||
test case | openshift/openshift-tests-private | 40d204cd-99a0-4f69-b24f-41ed5603ab74 | Author:dtobolik-NonHyperShiftHOST-NonPreRelease-Medium-66866-AWS machineset support for multiple AWS security groups [Disruptive][Slow] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:dtobolik-NonHyperShiftHOST-NonPreRelease-Medium-66866-AWS machineset support for multiple AWS security groups [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create aws security group")
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
randomMachineName := clusterinfra.ListWorkerMachineNames(oc)[0]
randomInstanceID, err := awsClient.GetAwsInstanceID(randomMachineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(randomInstanceID)
o.Expect(err).NotTo(o.HaveOccurred())
sgName := "ocp-66866-sg"
sgID, err := awsClient.CreateSecurityGroup(sgName, vpcID, "ocp-66866 testing security group")
o.Expect(err).NotTo(o.HaveOccurred())
defer awsClient.DeleteSecurityGroup(sgID)
err = awsClient.CreateTag(sgID, "Name", sgName)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new machineset")
machineSetName := infrastructureName + "-66866"
machineSet := clusterinfra.MachineSetDescription{Name: machineSetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machineSetName)
defer machineSet.DeleteMachineSet(oc)
machineSet.CreateMachineSet(oc)
g.By("Add security group to machineset")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machineSetName, "-n", "openshift-machine-api", "-p", `[{"op":"replace","path":"/spec/replicas","value":1},{"op":"add","path":"/spec/template/spec/providerSpec/value/securityGroups/-","value":{"filters":[{"name":"tag:Name","values":["`+sgName+`"]}]}}]`, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machineSetName)
g.By("Check security group is attached")
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machineSetName)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
securityGroups, err := awsClient.GetInstanceSecurityGroupIDs(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(securityGroups).Should(o.ContainElement(sgID))
}) | |||||
test case | openshift/openshift-tests-private | 11a6e49f-e74e-43f3-962e-b53eb6e1fb37 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-33058-Implement defaulting machineset values for azure [Disruptive] | ['"path/filepath"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-33058-Implement defaulting machineset values for azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
credType, err := oc.AsAdmin().Run("get").Args("cloudcredentials.operator.openshift.io/cluster", "-o=jsonpath={.spec.credentialsMode}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(credType, "Manual") {
g.Skip("Skip test on azure sts cluster")
}
mapiBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "mapi")
defaultMachinesetAzureTemplate := filepath.Join(mapiBaseDir, "default-machineset-azure.yaml")
clusterID := clusterinfra.GetInfrastructureName(oc)
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
location, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
vnet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.vnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
subnet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.subnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
networkResourceGroup, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.networkResourceGroup}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
defaultMachinesetAzure := defaultMachinesetAzureDescription{
name: infrastructureName + "-33058-default",
clustername: clusterID,
template: defaultMachinesetAzureTemplate,
location: location,
vnet: vnet,
subnet: subnet,
namespace: machineAPINamespace,
networkResourceGroup: networkResourceGroup,
}
defer clusterinfra.WaitForMachinesDisapper(oc, defaultMachinesetAzure.name)
defaultMachinesetAzure.createDefaultMachineSetOnAzure(oc)
defer defaultMachinesetAzure.deleteDefaultMachineSetOnAzure(oc)
}) | |||||
test case | openshift/openshift-tests-private | 4dbe7a2f-6b0d-4242-9f60-2959d0bbc10e | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-46966-Validation webhook check for gpus on GCP [Disruptive] | ['"strings"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-46966-Validation webhook check for gpus on GCP [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
skipTestIfSpotWorkers(oc)
architecture.SkipArchitectures(oc, architecture.ARM64)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-46966"
ms := clusterinfra.MachineSetDescription{machinesetName, 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
zone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(zone, "us-central1-") {
g.Skip("not valid zone for GPU machines")
}
g.By("1.Update machineset with A100 GPUs (A family) and set gpus")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"a2-highgpu-1g","gpus": [ { "count": 1,"type": "nvidia-tesla-p100" }]}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "A2 machine types have already attached gpus, additional gpus cannot be specified")).To(o.BeTrue())
g.By("2.Update machineset with nvidia-tesla-A100 Type")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"n1-standard-1","gpus": [ { "count": 1,"type": "nvidia-tesla-a100" }]}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "nvidia-tesla-a100 gpus, are only attached to the A2 machine types")).To(o.BeTrue())
g.By("3.Update machineset with other machine type families and set gpus")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"e2-medium","gpus": [ { "count": 1,"type": "nvidia-tesla-p100" }]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
out, _ = oc.AsAdmin().WithoutNamespace().Run("describe").Args(mapiMachine, machineName, "-n", machineAPINamespace).Output()
o.Expect(out).Should(o.ContainSubstring("e2-medium does not support accelerators. Only A2 and N1 machine type families support guest acceleartors"))
clusterinfra.ScaleMachineSet(oc, machinesetName, 0)
g.By("4.Update machineset with A100 GPUs (A2 family) nvidia-tesla-a100, onHostMaintenance is set to Migrate")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"a2-highgpu-1g","onHostMaintenance":"Migrate"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Forbidden: When GPUs are specified or using machineType with pre-attached GPUs(A2 machine family), onHostMaintenance must be set to Terminate")).To(o.BeTrue())
g.By("5.Update machineset with A100 GPUs (A2 family) nvidia-tesla-a100, restartPolicy with an invalid value")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"a2-highgpu-1g","restartPolicy": "Invalid","onHostMaintenance": "Terminate"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \"Invalid\": restartPolicy must be either Never or Always")).To(o.BeTrue())
g.By("6.Update machineset with A100 GPUs (A2 family) nvidia-tesla-a100, onHostMaintenance with an invalid value")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"a2-highgpu-1g","restartPolicy": "Always","onHostMaintenance": "Invalid"}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Invalid value: \"Invalid\": onHostMaintenance must be either Migrate or Terminate")).To(o.BeTrue())
g.By("7.Update machineset with other GPU types, count with an invalid value")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"n1-standard-1","restartPolicy": "Always","onHostMaintenance": "Terminate","gpus": [ { "count": -1,"type": "nvidia-tesla-p100" }]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
machineName = clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
out, _ = oc.AsAdmin().WithoutNamespace().Run("describe").Args(mapiMachine, machineName, "-n", machineAPINamespace).Output()
o.Expect(strings.Contains(out, "Number of accelerator cards attached to an instance must be one of [1, 2, 4]") || strings.Contains(out, "AcceleratorType nvidia-tesla-p100 not available in the zone")).To(o.BeTrue())
clusterinfra.ScaleMachineSet(oc, machinesetName, 0)
g.By("8.Update machineset with other GPU types, type with an empty value")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"template":{"spec":{"providerSpec":{"value":{"machineType":"n1-standard-1","restartPolicy": "Always","onHostMaintenance": "Terminate","gpus": [ { "count": 1,"type": "" }]}}}}}}`, "--type=merge").Output()
o.Expect(strings.Contains(out, "Required value: Type is required")).To(o.BeTrue())
g.By("9.Update machineset with other GPU types, type with an invalid value")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"machineType":"n1-standard-1","restartPolicy": "Always","onHostMaintenance": "Terminate","gpus": [ { "count": 1,"type": "invalid" }]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
machineName = clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
out, _ = oc.AsAdmin().WithoutNamespace().Run("describe").Args(mapiMachine, machineName, "-n", machineAPINamespace).Output()
o.Expect(out).Should(o.ContainSubstring("AcceleratorType invalid not available"))
}) | |||||
test case | openshift/openshift-tests-private | 6635abbf-c3c2-4b69-a139-c51213dc2646 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-High-30379-New machine can join cluster when VPC has custom DHCP option set [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-High-30379-New machine can join cluster when VPC has custom DHCP option set [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create a new dhcpOptions")
var newDhcpOptionsID, currentDhcpOptionsID string
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
newDhcpOptionsID, err := awsClient.CreateDhcpOptionsWithDomainName("example30379.com")
if err != nil {
g.Skip("The credential is insufficient to perform create dhcpOptions operation, skip the cases!!")
}
defer func() {
err := awsClient.DeleteDhcpOptions(newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Associate the VPC with the new dhcpOptionsId")
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
currentDhcpOptionsID, err = awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := awsClient.AssociateDhcpOptions(vpcID, currentDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = awsClient.AssociateDhcpOptions(vpcID, newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new machineset")
machinesetName := infrastructureName + "-30379"
ms := clusterinfra.MachineSetDescription{machinesetName, 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
machineNameOfMachineSet := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineNameOfMachineSet)
readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readyStatus).Should(o.Equal("True"))
internalDNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machineNameOfMachineSet, "-o=jsonpath={.status.addresses[?(@.type==\"InternalDNS\")].address}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(internalDNS, "example30379.com")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 9697f6a1-fb37-4987-bc3e-46f9919a665e | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73762-New machine can join cluster when VPC has custom DHCP option set containing multiple domain names [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73762-New machine can join cluster when VPC has custom DHCP option set containing multiple domain names [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create a new dhcpOptions")
var newDhcpOptionsID, currentDhcpOptionsID string
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
newDhcpOptionsID, err := awsClient.CreateDhcpOptionsWithDomainName("EXAMple73762A.com. example73762b.com. eXaMpLe73762C.COM")
if err != nil {
g.Skip("The credential is insufficient to perform create dhcpOptions operation, skip the cases!!")
}
defer func() {
err := awsClient.DeleteDhcpOptions(newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Associate the VPC with the new dhcpOptionsId")
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
currentDhcpOptionsID, err = awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := awsClient.AssociateDhcpOptions(vpcID, currentDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = awsClient.AssociateDhcpOptions(vpcID, newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new machineset")
machinesetName := infrastructureName + "-73762"
ms := clusterinfra.MachineSetDescription{machinesetName, 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
machineNameOfMachineSet := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineNameOfMachineSet)
readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readyStatus).Should(o.Equal("True"))
internalDNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machineNameOfMachineSet, "-o=jsonpath={.status.addresses[?(@.type==\"InternalDNS\")].address}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(internalDNS, "EXAMple73762A.com") && strings.Contains(internalDNS, "example73762b.com") && strings.Contains(internalDNS, "eXaMpLe73762C.COM")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 37078207-f581-4e16-93a1-34236029151a | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-73851-Node shouldn't have uninitialized taint [Disruptive] | ['"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-High-73851-Node shouldn't have uninitialized taint [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-73851"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset taint")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"taints":[{"key":"node.kubernetes.io/unreachable","effect":"NoExecute"},{"key":"anything","effect":"NoSchedule"},{"key":"node-role.kubernetes.io/infra","effect":"NoExecute"},{"key":"node.kubernetes.io/not-ready","effect":"NoExecute"}]}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check no uninitialized taint in node")
machineName := clusterinfra.GetLatestMachineFromMachineSet(oc, machinesetName)
o.Expect(machineName).NotTo(o.BeEmpty())
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName)
o.Eventually(func() bool {
readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
return err == nil && o.Expect(readyStatus).Should(o.Equal("True"))
}).WithTimeout(5 * time.Minute).WithPolling(30 * time.Second).Should(o.BeTrue())
taints, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.spec.taints}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(taints).ShouldNot(o.ContainSubstring("uninitialized"))
}) | |||||
test case | openshift/openshift-tests-private | d6287371-5754-49ec-b907-7d4d2cd990f4 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73668-Create machineset with Reserved Capacity [Disruptive] | ['"context"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-73668-Create machineset with Reserved Capacity [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
skipTestIfSpotWorkers(oc)
azureCloudName, azureErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(azureErr).NotTo(o.HaveOccurred())
if azureCloudName == "AzureStackCloud" || azureCloudName == "AzureUSGovernmentCloud" {
g.Skip("Skip for ASH and azure Gov due to no zone for ash, and for USGov it's hard to getclient with baseURI!")
}
exutil.By("Create a machineset")
machinesetName := infrastructureName + "-73668"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
ms.CreateMachineSet(oc)
zone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if zone == "" {
g.Skip("Zone doesn't exist, capacity reservation group cannot be set on a virtual machine which is part of an availability set!")
}
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
machineType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.vmSize}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create capacityReservationGroup and capacityReservation")
resourceGroup, err := exutil.GetAzureCredentialFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
capacityReservationGroupName := "capacityReservationGroup73668"
capacityReservationName := "capacityReservation73668"
azClientSet := exutil.NewAzureClientSetWithRootCreds(oc)
capacityReservationGroup, err := azClientSet.CreateCapacityReservationGroup(context.Background(), capacityReservationGroupName, resourceGroup, region, zone)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(capacityReservationGroup).NotTo(o.BeEmpty())
err = azClientSet.CreateCapacityReservation(context.Background(), capacityReservationGroupName, capacityReservationName, region, resourceGroup, machineType, zone)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
ms.DeleteMachineSet(oc)
clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
azClientSet.DeleteCapacityReservation(context.Background(), capacityReservationGroupName, capacityReservationName, resourceGroup)
azClientSet.DeleteCapacityReservationGroup(context.Background(), capacityReservationGroupName, resourceGroup)
}()
exutil.By("Patch machineset with valid capacityReservationGroupID")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"capacityReservationGroupID": "`+capacityReservationGroup+`"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
exutil.By("Check machine with capacityReservationGroupID")
capacityReservationGroupID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.capacityReservationGroupID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(capacityReservationGroupID).Should(o.ContainSubstring("capacityReservationGroups"))
exutil.By("Patch machineset with empty capacityReservationGroupID and set replicas to 2")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":2,"template":{"spec":{"providerSpec":{"value":{ "capacityReservationGroupID": ""}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName)
exutil.By("Check machine without capacityReservationGroupID")
machine := clusterinfra.GetLatestMachineFromMachineSet(oc, machinesetName)
capacityReservationGroupID, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machine, "-n", "openshift-machine-api", "-o=jsonpath={.spec.providerSpec.value.capacityReservationGroupID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(capacityReservationGroupID).To(o.BeEmpty())
}) | |||||
test case | openshift/openshift-tests-private | e4ef06b0-5a26-4793-9786-b71b505d195a | Author:zhsun-NonHyperShiftHOST-Medium-73669-Webhook validation for Reserved Capacity [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-73669-Webhook validation for Reserved Capacity [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
skipTestIfSpotWorkers(oc)
azureCloudName, azureErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(azureErr).NotTo(o.HaveOccurred())
if azureCloudName == "AzureStackCloud" || azureCloudName == "AzureUSGovernmentCloud" {
g.Skip("Skip for ASH and azure Gov due to no zone for ash, and for USGov it's hard to getclient with baseURI!")
}
exutil.By("Create a machineset ")
machinesetName := infrastructureName + "-73669"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
zone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if zone == "" {
g.Skip("Zone doesn't exist, capacity reservation group cannot be set on a virtual machine which is part of an availability set!")
}
exutil.By("Patch machineset that the value of capacityReservationGroupID does not start with /")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{ "capacityReservationGroupID": "subscriptions/53b8f551-f0fc-4bea-8cba-6d1fefd54c8a/resourceGroups/ZHSUN-AZ9-DVD88-RG/providers/Microsoft.Compute/capacityReservationGroups/zhsun-capacity"}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("must start with '/'"))
exutil.By("Patch machineset with invalid subscriptions")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{ "capacityReservationGroupID": "/subscrip/53b8f551-f0fc-4bea-8cba-6d1fefd54c8a/resourceGroups/ZHSUN-AZ9-DVD88-RG/providers/Microsoft.Compute/capacityReservationGroups/zhsun-capacity"}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("capacityReservationGroupID: Invalid value"))
exutil.By("Patch machineset with invalid resourceGroups")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{ "capacityReservationGroupID": "/subscriptions/53b8f551-f0fc-4bea-8cba-6d1fefd54c8a/resource/ZHSUN-AZ9-DVD88-RG/providers/Microsoft.Compute/capacityReservationGroups/zhsun-capacity"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
exutil.By("Patch machineset with invalid capacityReservationGroups")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":2,"template":{"spec":{"providerSpec":{"value":{ "capacityReservationGroupID": "/subscriptions/53b8f551-f0fc-4bea-8cba-6d1fefd54c8a/resourceGroups/ZHSUN-AZ9-DVD88-RG/providers/Microsoft.Compute/capacityReservation/zhsun-capacity"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
}) | |||||
test case | openshift/openshift-tests-private | 0b2af681-dd47-49cb-bd9e-58c6735dc8c5 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-74603-[MAPI] Support AWS Placement Group Partition Number [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-74603-[MAPI] Support AWS Placement Group Partition Number [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "us-east-2" && region != "us-east-1" {
g.Skip("Not support region " + region + " for the case for now.")
}
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
_, err = awsClient.GetPlacementGroupByName("pgpartition3")
if err != nil {
g.Skip("There is no this placement group for testing, skip the cases!!")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-74603"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
exutil.By("Patch machineset only with valid partition placementGroupName")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgpartition3"}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
exutil.By("Check machine with placementGroupName and without placementGroupPartition ")
placementGroupName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.placementGroupName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(placementGroupName).Should(o.Equal("pgpartition3"))
placementGroupPartition, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.placementGroupPartition}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(placementGroupPartition).To(o.BeEmpty())
exutil.By("Patch machineset with valid partition placementGroupName and placementGroupPartition")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":2,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgpartition3", "placementGroupPartition":2}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName)
exutil.By("Check machine with placementGroupName and placementGroupPartition")
machine := clusterinfra.GetLatestMachineFromMachineSet(oc, machinesetName)
placementGroupName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machine, "-n", "openshift-machine-api", "-o=jsonpath={.spec.providerSpec.value.placementGroupName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(placementGroupName).Should(o.Equal("pgpartition3"))
placementGroupPartition, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machine, "-n", "openshift-machine-api", "-o=jsonpath={.spec.providerSpec.value.placementGroupPartition}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(placementGroupPartition).Should(o.Equal("2"))
}) | |||||
test case | openshift/openshift-tests-private | d81465b0-e205-45cd-9b0d-4a9fce264989 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-75037-[MAPI] Webhook validation for AWS Placement Group Partition Number [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-75037-[MAPI] Webhook validation for AWS Placement Group Partition Number [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region != "us-east-2" && region != "us-east-1" {
g.Skip("Not support region " + region + " for the case for now.")
}
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
_, err = awsClient.GetPlacementGroupByName("pgpartition3")
if err != nil {
g.Skip("There is no this placement group for testing, skip the cases!!")
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-75037"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
exutil.By("Update machineset with invalid Placement group partition nubmer")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgpartition3", "placementGroupPartition":0}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("placementGroupPartition: Invalid value: 0: providerSpec.placementGroupPartition must be between 1 and 7"))
exutil.By("Update machineset with placementGroupPartition but without placementGroupName")
out, _ = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupPartition":2}}}}}}`, "--type=merge").Output()
o.Expect(out).To(o.ContainSubstring("placementGroupPartition: Invalid value: 2: providerSpec.placementGroupPartition is set but providerSpec.placementGroupName is empty"))
exutil.By("Patch machineset with valid placementGroupPartition but cluster placementGroupName")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgcluster", "placementGroupPartition":2}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
exutil.By("Patch machineset with invalid placementGroupPartition of the partition placementGroupName")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"placementGroupName":"pgpartition3", "placementGroupPartition":4}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
}) | |||||
test case | openshift/openshift-tests-private | 065f6c13-345f-4974-8a16-b28d5ecf2ba7 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-24721-Add support for machine tags [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-24721-Add support for machine tags [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
exutil.By("Create a machineset")
machinesetName := infrastructureName + "-24721"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
exutil.By("Update machineset with tags")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"tags":{"key24721a":"value24721a","key24721b":"value24721b"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
exutil.By("Check machine with tags")
tags, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.tags}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("tags:%s", tags)
o.Expect(tags).Should(o.ContainSubstring("key24721b"))
}) | |||||
test case | openshift/openshift-tests-private | ca2ddc74-702a-4fdf-9b02-4881e9c4f073 | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-52602-Drain operation should be asynchronous from the other machine operations [Disruptive] | ['"path/filepath"', '"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-52602-Drain operation should be asynchronous from the other machine operations [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.IBMCloud, clusterinfra.Nutanix, clusterinfra.VSphere, clusterinfra.OpenStack)
exutil.By("Create a new machineset")
machinesetName := infrastructureName + "-52602"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
exutil.By("Scale machineset to 5")
clusterinfra.ScaleMachineSet(oc, machinesetName, 5)
exutil.By("Create PDB")
miscDir := exutil.FixturePath("testdata", "clusterinfrastructure", "misc")
pdbTemplate := filepath.Join(miscDir, "pdb.yaml")
workloadTemplate := filepath.Join(miscDir, "workload-with-label.yaml")
pdb := PodDisruptionBudget{name: "pdb-52602", namespace: machineAPINamespace, template: pdbTemplate, label: "label-52602"}
workLoad := workLoadDescription{name: "workload-52602", namespace: machineAPINamespace, template: workloadTemplate, label: "label-52602"}
defer pdb.deletePDB(oc)
pdb.createPDB(oc)
defer workLoad.deleteWorkLoad(oc)
workLoad.createWorkLoad(oc)
exutil.By("Delete machines")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "--wait=false").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check machines can quickly be created without waiting for the other Nodes to drain.")
o.Eventually(func() bool {
machineNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[*].metadata.name}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
machines := strings.Fields(machineNames)
if len(machines) == 10 {
return true
}
return false
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | b9fbd4e9-ca83-4933-bd13-488da41c9e42 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-76367-[MAPI] Allow creating Nutanix worker VMs with GPUs [Disruptive] | ['"fmt"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-76367-[MAPI] Allow creating Nutanix worker VMs with GPUs [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix)
// skip zones other than Development-GPU
zones, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-o=jsonpath={.items[*].metadata.labels.machine\\.openshift\\.io\\/zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(zones, "Development-GPU") {
g.Skip(fmt.Sprintf("this case can be only run in Development-GPU zone, but is's %s", zones))
}
g.By("Create a new machineset")
machinesetName := infrastructureName + "-76367"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with gpus")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"gpus":[{"type":"Name","name":"Tesla T4 compute"}]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with gpus")
gpus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.gpus}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("gpus:%s", gpus)
o.Expect(strings.Contains(gpus, "Tesla T4 compute")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | eb703b08-4e89-4ded-891f-6e32536063b8 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-76366-[MAPI] Allow creating Nutanix VMs with multiple disks [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/machines.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-76366-[MAPI] Allow creating Nutanix VMs with multiple disks [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix)
g.By("Create a new machineset")
machinesetName := infrastructureName + "-76366"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with data disks")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"dataDisks":[{"deviceProperties":{"deviceType":"Disk","adapterType":"SCSI","deviceIndex":1},"diskSize":"1Gi","storageConfig":{"diskMode":"Standard"}}]}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine with data disks")
dataDisks, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.dataDisks}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("dataDisks:%s", dataDisks)
o.Expect(strings.Contains(dataDisks, "SCSI")).To(o.BeTrue())
}) | |||||
test | openshift/openshift-tests-private | 5ebf035e-9955-4a8b-9876-ab36b0f8da9e | metrics | import (
"fmt"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/metrics.go | package clusterinfrastructure
import (
"fmt"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure MAPI", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("metrics", exutil.KubeConfigPath())
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-45499-mapi_current_pending_csr should reflect real pending CSR count", func() {
g.By("Check the MAPI pending csr count, metric only fires if there are MAPI specific CSRs pending")
csrsName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csr", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
pending := 0
for _, csrName := range strings.Split(csrsName, " ") {
csr, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("csr", csrName).Output()
if strings.Contains(csr, "Pending") && (strings.Contains(csr, "system:serviceaccount:openshift-machine-config-operator:node-bootstrapper") || strings.Contains(csr, "system:node:")) {
pending++
}
}
g.By("Get machine-approver-controller pod name")
machineApproverPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", machineApproverNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check the value of mapi_current_pending_csr")
token := getPrometheusSAToken(oc)
metrics, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(machineApproverPodName, "-c", "machine-approver-controller", "-n", machineApproverNamespace, "-i", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", token), "https://localhost:9192/metrics").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(metrics).NotTo(o.BeEmpty())
checkMetricsShown(oc, "mapi_current_pending_csr", strconv.Itoa(pending))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-43764-MachineHealthCheckUnterminatedShortCircuit alert should be fired when a MHC has been in a short circuit state [Serial][Slow][Disruptive]", func() {
g.By("Create a new machineset")
clusterinfra.SkipConditionally(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-43764"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create a MachineHealthCheck")
clusterID := clusterinfra.GetInfrastructureName(oc)
msMachineRole, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-o=jsonpath={.spec.template.metadata.labels.machine\\.openshift\\.io\\/cluster-api-machine-type}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
mhcBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "mhc")
mhcTemplate := filepath.Join(mhcBaseDir, "mhc.yaml")
mhc := mhcDescription{
clusterid: clusterID,
maxunhealthy: "0%",
machinesetName: machinesetName,
machineRole: msMachineRole,
name: "mhc-43764",
template: mhcTemplate,
namespace: "openshift-machine-api",
}
defer mhc.deleteMhc(oc)
mhc.createMhc(oc)
g.By("Delete the node attached to the machine")
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("node", nodeName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Get machine-api-controller pod name")
machineAPIControllerPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-l", "api=clusterapi", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check metrics mapi_machinehealthcheck_short_circuit")
token := getPrometheusSAToken(oc)
metrics, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(machineAPIControllerPodName, "-c", "machine-healthcheck-controller", "-n", machineAPINamespace, "-i", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", token), "https://localhost:8444/metrics").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(metrics).NotTo(o.BeEmpty())
o.Expect(metrics).To(o.ContainSubstring("mapi_machinehealthcheck_short_circuit{name=\"" + mhc.name + "\",namespace=\"openshift-machine-api\"} " + strconv.Itoa(1)))
g.By("Check alert MachineHealthCheckUnterminatedShortCircuit is raised")
checkAlertRaised(oc, "MachineHealthCheckUnterminatedShortCircuit")
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-36989-mapi_instance_create_failed metrics should work [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
var patchstr string
platform := clusterinfra.CheckPlatform(oc)
switch platform {
case clusterinfra.AWS, clusterinfra.AlibabaCloud:
patchstr = `{"spec":{"replicas":5,"template":{"spec":{"providerSpec":{"value":{"instanceType":"invalid"}}}}}}`
case clusterinfra.GCP:
patchstr = `{"spec":{"replicas":5,"template":{"spec":{"providerSpec":{"value":{"machineType":"invalid"}}}}}}`
case clusterinfra.Azure:
patchstr = `{"spec":{"replicas":5,"template":{"spec":{"providerSpec":{"value":{"vmSize":"invalid"}}}}}}`
/*
there is a bug(https://bugzilla.redhat.com/show_bug.cgi?id=1900538) for openstack
case clusterinfra.OpenStack:
patchstr = `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"flavor":"invalid"}}}}}}`
*/
case clusterinfra.VSphere:
patchstr = `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"template":"invalid"}}}}}}`
default:
e2e.Logf("Not support cloud provider for the case for now.")
g.Skip("Not support cloud provider for the case for now.")
}
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-36989"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with invalid instanceType(or other similar field)")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", patchstr, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-o=jsonpath={.items[0].metadata.name}", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check metrics mapi_instance_create_failed is shown")
checkMetricsShown(oc, "mapi_instance_create_failed", machineName)
g.By("Investigate cluster with excessive number of samples for the machine-api-controllers job - case-OCP63167")
metricsName := "mapi_instance_create_failed"
timestampRegex := regexp.MustCompile(`\b(?:[0-1]?[0-9]|2[0-3]):[0-5]?[0-9]:[0-5]?[0-9]\b`)
token := getPrometheusSAToken(oc)
url, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "prometheus-k8s", "-n", "openshift-monitoring", "-o=jsonpath={.spec.host}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
metricsCMD := fmt.Sprintf("oc -n openshift-monitoring exec -c prometheus prometheus-k8s-0 -- curl -X GET --header \"Authorization: Bearer %s\" https://%s/api/v1/query?query=%s --insecure", token, url, metricsName)
metricsOutput, cmdErr := exec.Command("bash", "-c", metricsCMD).Output()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(timestampRegex.MatchString(string(metricsOutput))).NotTo(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-High-25615-Medium-37264-Machine metrics should be collected [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.VSphere, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud, clusterinfra.Nutanix, clusterinfra.OpenStack)
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-25615-37264"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Check metrics mapi_machine_created_timestamp_seconds is shown")
checkMetricsShown(oc, "mapi_machine_created_timestamp_seconds")
g.By("Check metrics mapi_machine_phase_transition_seconds_sum is shown")
checkMetricsShown(oc, "mapi_machine_phase_transition_seconds_sum")
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | 59d06871-d582-420a-83bd-635d1960f17e | Author:zhsun-NonHyperShiftHOST-Medium-45499-mapi_current_pending_csr should reflect real pending CSR count | ['"fmt"', '"os/exec"', '"strconv"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/metrics.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-45499-mapi_current_pending_csr should reflect real pending CSR count", func() {
g.By("Check the MAPI pending csr count, metric only fires if there are MAPI specific CSRs pending")
csrsName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csr", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
pending := 0
for _, csrName := range strings.Split(csrsName, " ") {
csr, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("csr", csrName).Output()
if strings.Contains(csr, "Pending") && (strings.Contains(csr, "system:serviceaccount:openshift-machine-config-operator:node-bootstrapper") || strings.Contains(csr, "system:node:")) {
pending++
}
}
g.By("Get machine-approver-controller pod name")
machineApproverPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", machineApproverNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check the value of mapi_current_pending_csr")
token := getPrometheusSAToken(oc)
metrics, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(machineApproverPodName, "-c", "machine-approver-controller", "-n", machineApproverNamespace, "-i", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", token), "https://localhost:9192/metrics").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(metrics).NotTo(o.BeEmpty())
checkMetricsShown(oc, "mapi_current_pending_csr", strconv.Itoa(pending))
}) | |||||
test case | openshift/openshift-tests-private | c688b35a-a2a0-46a4-a4df-510d91696075 | Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-43764-MachineHealthCheckUnterminatedShortCircuit alert should be fired when a MHC has been in a short circuit state [Serial][Slow][Disruptive] | ['"fmt"', '"os/exec"', '"path/filepath"', '"strconv"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/metrics.go | g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-43764-MachineHealthCheckUnterminatedShortCircuit alert should be fired when a MHC has been in a short circuit state [Serial][Slow][Disruptive]", func() {
g.By("Create a new machineset")
clusterinfra.SkipConditionally(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-43764"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Create a MachineHealthCheck")
clusterID := clusterinfra.GetInfrastructureName(oc)
msMachineRole, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-o=jsonpath={.spec.template.metadata.labels.machine\\.openshift\\.io\\/cluster-api-machine-type}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
mhcBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "mhc")
mhcTemplate := filepath.Join(mhcBaseDir, "mhc.yaml")
mhc := mhcDescription{
clusterid: clusterID,
maxunhealthy: "0%",
machinesetName: machinesetName,
machineRole: msMachineRole,
name: "mhc-43764",
template: mhcTemplate,
namespace: "openshift-machine-api",
}
defer mhc.deleteMhc(oc)
mhc.createMhc(oc)
g.By("Delete the node attached to the machine")
machineName := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineName)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("node", nodeName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Get machine-api-controller pod name")
machineAPIControllerPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-l", "api=clusterapi", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check metrics mapi_machinehealthcheck_short_circuit")
token := getPrometheusSAToken(oc)
metrics, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(machineAPIControllerPodName, "-c", "machine-healthcheck-controller", "-n", machineAPINamespace, "-i", "--", "curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", token), "https://localhost:8444/metrics").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(metrics).NotTo(o.BeEmpty())
o.Expect(metrics).To(o.ContainSubstring("mapi_machinehealthcheck_short_circuit{name=\"" + mhc.name + "\",namespace=\"openshift-machine-api\"} " + strconv.Itoa(1)))
g.By("Check alert MachineHealthCheckUnterminatedShortCircuit is raised")
checkAlertRaised(oc, "MachineHealthCheckUnterminatedShortCircuit")
}) | |||||
test case | openshift/openshift-tests-private | 65ee0183-9595-488d-a1ae-1ac074f895f3 | Author:huliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-36989-mapi_instance_create_failed metrics should work [Disruptive] | ['"fmt"', '"os/exec"', '"regexp"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/metrics.go | g.It("Author:huliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-36989-mapi_instance_create_failed metrics should work [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
var patchstr string
platform := clusterinfra.CheckPlatform(oc)
switch platform {
case clusterinfra.AWS, clusterinfra.AlibabaCloud:
patchstr = `{"spec":{"replicas":5,"template":{"spec":{"providerSpec":{"value":{"instanceType":"invalid"}}}}}}`
case clusterinfra.GCP:
patchstr = `{"spec":{"replicas":5,"template":{"spec":{"providerSpec":{"value":{"machineType":"invalid"}}}}}}`
case clusterinfra.Azure:
patchstr = `{"spec":{"replicas":5,"template":{"spec":{"providerSpec":{"value":{"vmSize":"invalid"}}}}}}`
/*
there is a bug(https://bugzilla.redhat.com/show_bug.cgi?id=1900538) for openstack
case clusterinfra.OpenStack:
patchstr = `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"flavor":"invalid"}}}}}}`
*/
case clusterinfra.VSphere:
patchstr = `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"template":"invalid"}}}}}}`
default:
e2e.Logf("Not support cloud provider for the case for now.")
g.Skip("Not support cloud provider for the case for now.")
}
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-36989"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with invalid instanceType(or other similar field)")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", patchstr, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineFailed(oc, machinesetName)
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-o=jsonpath={.items[0].metadata.name}", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check metrics mapi_instance_create_failed is shown")
checkMetricsShown(oc, "mapi_instance_create_failed", machineName)
g.By("Investigate cluster with excessive number of samples for the machine-api-controllers job - case-OCP63167")
metricsName := "mapi_instance_create_failed"
timestampRegex := regexp.MustCompile(`\b(?:[0-1]?[0-9]|2[0-3]):[0-5]?[0-9]:[0-5]?[0-9]\b`)
token := getPrometheusSAToken(oc)
url, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "prometheus-k8s", "-n", "openshift-monitoring", "-o=jsonpath={.spec.host}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
metricsCMD := fmt.Sprintf("oc -n openshift-monitoring exec -c prometheus prometheus-k8s-0 -- curl -X GET --header \"Authorization: Bearer %s\" https://%s/api/v1/query?query=%s --insecure", token, url, metricsName)
metricsOutput, cmdErr := exec.Command("bash", "-c", metricsCMD).Output()
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(timestampRegex.MatchString(string(metricsOutput))).NotTo(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 264c31fa-e3fc-4f4a-b988-6140b3f14c39 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-High-25615-Medium-37264-Machine metrics should be collected [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/metrics.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-High-25615-Medium-37264-Machine metrics should be collected [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.VSphere, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud, clusterinfra.Nutanix, clusterinfra.OpenStack)
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-25615-37264"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Check metrics mapi_machine_created_timestamp_seconds is shown")
checkMetricsShown(oc, "mapi_machine_created_timestamp_seconds")
g.By("Check metrics mapi_machine_phase_transition_seconds_sum is shown")
checkMetricsShown(oc, "mapi_machine_phase_transition_seconds_sum")
}) | |||||
file | openshift/openshift-tests-private | c0a2ad4b-b102-4b49-b41f-e974fb19e1ee | metrics_utils | import (
"fmt"
"os/exec"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/metrics_utils.go | package clusterinfrastructure
import (
"fmt"
"os/exec"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// get a token assigned to prometheus-k8s from openshift-monitoring namespace
func getPrometheusSAToken(oc *exutil.CLI) string {
e2e.Logf("Getting a token assgined to prometheus-k8s from openshift-monitoring namespace...")
token, err := exutil.GetSAToken(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(token).NotTo(o.BeEmpty())
return token
}
// check if alert raised (pengding or firing)
func checkAlertRaised(oc *exutil.CLI, alertName string) {
token := getPrometheusSAToken(oc)
url, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "prometheus-k8s", "-n", "openshift-monitoring", "-o=jsonpath={.spec.host}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
alertCMD := fmt.Sprintf("oc -n openshift-monitoring exec -c prometheus prometheus-k8s-0 -- curl -s -k -H \"Authorization: Bearer %s\" https://%s/api/v1/alerts | jq -r '.data.alerts[] | select (.labels.alertname == \"%s\")'", token, url, alertName)
err = wait.Poll(30*time.Second, 960*time.Second, func() (bool, error) {
result, err := exec.Command("bash", "-c", alertCMD).Output()
if err != nil {
e2e.Logf("Error '%v' retrieving prometheus alert, retry ...", err)
return false, nil
}
if len(string(result)) == 0 {
e2e.Logf("Prometheus alert is nil, retry ...")
return false, nil
}
if !strings.Contains(string(result), "firing") && !strings.Contains(string(result), "pending") {
e2e.Logf(string(result))
return false, fmt.Errorf("alert state is not firing or pending")
}
e2e.Logf("Alert %s found with the status firing or pending", alertName)
return true, nil
})
exutil.AssertWaitPollNoErr(err, "alert state is not firing or pending")
}
// check if metrics shown
func checkMetricsShown(oc *exutil.CLI, metricsName string, args ...string) {
e2e.Logf("Check metrics " + metricsName)
token := getPrometheusSAToken(oc)
url, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "prometheus-k8s", "-n", "openshift-monitoring", "-o=jsonpath={.spec.host}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
metricsCMD := fmt.Sprintf("oc -n openshift-monitoring exec -c prometheus prometheus-k8s-0 -- curl -s -k -H \"Authorization: Bearer %s\" https://%s/api/v1/query?query=%s", token, url, metricsName)
var queryResult string
errQuery := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) {
result, err := exec.Command("bash", "-c", metricsCMD).Output()
if err != nil {
e2e.Logf("Error '%v' retrieving metrics, retry ...", err)
return false, nil
}
queryResult = string(result)
if !strings.Contains(queryResult, metricsName) {
e2e.Logf("Metrics not contain '%s', retry ...", metricsName)
return false, nil
}
for _, arg := range args {
if !strings.Contains(queryResult, arg) {
e2e.Logf("Metrics not contain '%s', retry ...", arg)
return false, nil
}
}
e2e.Logf(metricsName + " metrics is shown right")
return true, nil
})
if errQuery != nil {
e2e.Logf("the failing query result is %s", queryResult)
}
exutil.AssertWaitPollNoErr(errQuery, "Check metrics "+metricsName+" failed")
}
| package clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 8cde6cea-0793-4d5d-8709-12cc77518e6c | getPrometheusSAToken | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/metrics_utils.go | func getPrometheusSAToken(oc *exutil.CLI) string {
e2e.Logf("Getting a token assgined to prometheus-k8s from openshift-monitoring namespace...")
token, err := exutil.GetSAToken(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(token).NotTo(o.BeEmpty())
return token
} | clusterinfrastructure | |||||
function | openshift/openshift-tests-private | 96513a87-961c-49d6-bd03-7c5ce680737a | checkAlertRaised | ['"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/metrics_utils.go | func checkAlertRaised(oc *exutil.CLI, alertName string) {
token := getPrometheusSAToken(oc)
url, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "prometheus-k8s", "-n", "openshift-monitoring", "-o=jsonpath={.spec.host}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
alertCMD := fmt.Sprintf("oc -n openshift-monitoring exec -c prometheus prometheus-k8s-0 -- curl -s -k -H \"Authorization: Bearer %s\" https://%s/api/v1/alerts | jq -r '.data.alerts[] | select (.labels.alertname == \"%s\")'", token, url, alertName)
err = wait.Poll(30*time.Second, 960*time.Second, func() (bool, error) {
result, err := exec.Command("bash", "-c", alertCMD).Output()
if err != nil {
e2e.Logf("Error '%v' retrieving prometheus alert, retry ...", err)
return false, nil
}
if len(string(result)) == 0 {
e2e.Logf("Prometheus alert is nil, retry ...")
return false, nil
}
if !strings.Contains(string(result), "firing") && !strings.Contains(string(result), "pending") {
e2e.Logf(string(result))
return false, fmt.Errorf("alert state is not firing or pending")
}
e2e.Logf("Alert %s found with the status firing or pending", alertName)
return true, nil
})
exutil.AssertWaitPollNoErr(err, "alert state is not firing or pending")
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | e4358d37-9126-484e-87ca-25c23afa2e61 | checkMetricsShown | ['"fmt"', '"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/metrics_utils.go | func checkMetricsShown(oc *exutil.CLI, metricsName string, args ...string) {
e2e.Logf("Check metrics " + metricsName)
token := getPrometheusSAToken(oc)
url, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "prometheus-k8s", "-n", "openshift-monitoring", "-o=jsonpath={.spec.host}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
metricsCMD := fmt.Sprintf("oc -n openshift-monitoring exec -c prometheus prometheus-k8s-0 -- curl -s -k -H \"Authorization: Bearer %s\" https://%s/api/v1/query?query=%s", token, url, metricsName)
var queryResult string
errQuery := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) {
result, err := exec.Command("bash", "-c", metricsCMD).Output()
if err != nil {
e2e.Logf("Error '%v' retrieving metrics, retry ...", err)
return false, nil
}
queryResult = string(result)
if !strings.Contains(queryResult, metricsName) {
e2e.Logf("Metrics not contain '%s', retry ...", metricsName)
return false, nil
}
for _, arg := range args {
if !strings.Contains(queryResult, arg) {
e2e.Logf("Metrics not contain '%s', retry ...", arg)
return false, nil
}
}
e2e.Logf(metricsName + " metrics is shown right")
return true, nil
})
if errQuery != nil {
e2e.Logf("the failing query result is %s", queryResult)
}
exutil.AssertWaitPollNoErr(errQuery, "Check metrics "+metricsName+" failed")
} | clusterinfrastructure | ||||
test | openshift/openshift-tests-private | c9e287b5-4402-4a03-b096-efd4b71b3158 | misc_cases | import (
"context"
"path/filepath"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_cases.go | package clusterinfrastructure
import (
"context"
"path/filepath"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure MAPI", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("machine-proxy-cluster", exutil.KubeConfigPath())
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-High-37384-Machine API components should honour cluster wide proxy settings", func() {
g.By("Check if it's a proxy cluster")
httpProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy/cluster", "-o=jsonpath={.spec.httpProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
httpsProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy/cluster", "-o=jsonpath={.spec.httpsProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(httpProxy) == 0 && len(httpsProxy) == 0 {
g.Skip("Skip for non-proxy cluster!")
}
g.By("Check if machine-controller-pod is using cluster proxy")
machineControllerPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-machine-api", "-l", "k8s-app=controller", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(machineControllerPod) == 0 {
g.Skip("Skip for no machine-api-controller pod in cluster")
} else {
envMapi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", machineControllerPod, "-n", "openshift-machine-api", "-o=jsonpath={.spec.containers[0].env[0].name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(envMapi) == 0 {
e2e.Failf("jsonpath needs to be reviewed")
} else if strings.Compare(envMapi, "HTTP_PROXY") != 0 {
g.By("machine-api does not uses cluster proxy")
e2e.Failf("For more details refer - BZ 1896704")
}
}
})
// author: [email protected]
g.It("Author:huliu-Low-34718-Node labels and Affinity definition in PV should match", func() {
miscBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "misc")
pvcTemplate := filepath.Join(miscBaseDir, "pvc34718.yaml")
podTemplate := filepath.Join(miscBaseDir, "pod34718.yaml")
pvc := pvcDescription{
storageSize: "1Gi",
template: pvcTemplate,
}
podName := "task-pv-pod"
pod := exutil.Pod{Name: podName, Namespace: "openshift-machine-api", Template: podTemplate, Parameters: []string{}}
storageclassExists, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sc", "-o", "jsonpath={.items}").Output()
//If no storage class then items string is returned as []
if len(storageclassExists) < 3 {
g.Skip("Storage class not available by default")
}
g.By("Create pvc")
defer pvc.deletePvc(oc)
pvc.createPvc(oc)
g.By("Create pod")
defer pod.Delete(oc)
pod.Create(oc)
nodeName, _ := exutil.GetPodNodeName(oc, "openshift-machine-api", podName)
getNodeLabels, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.labels}", "-n", "openshift-machine-api").Output()
desribePv, _ := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pv", "-n", "openshift-machine-api").Output()
if strings.Contains(getNodeLabels, `region":"`) && strings.Contains(desribePv, "region in ") {
g.By("Check region info")
compileRegex := regexp.MustCompile(`region":"(.*?)"`)
matchArr := compileRegex.FindStringSubmatch(getNodeLabels)
region := matchArr[len(matchArr)-1]
if !strings.Contains(desribePv, "region in ["+region+"]") {
e2e.Failf("Cannot get log region in [" + region + "]")
}
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-High-60147-[clusterInfra] check machineapi and clusterautoscaler as optional operator", func() {
g.By("Check capability shows operator is optional")
capability, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o=jsonpath={.status.capabilities.enabledCapabilities}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//This condition is for clusters installed with baseline capabilties set to NONE
if strings.Contains(capability, "MachineAPI") {
g.By("Check cluster-autoscaler has annotation to confirm optional status")
annotation, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cluster-autoscaler", "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(annotation).To(o.ContainSubstring("\"capability.openshift.io/name\":\"MachineAPI\""))
g.By("Check control-plane-machine-set has annotation to confirm optional status")
annotation, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "control-plane-machine-set", "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(annotation).To(o.ContainSubstring("\"capability.openshift.io/name\":\"MachineAPI\""))
g.By("Check machine-api has annotation to confirm optional status")
annotation, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "machine-api", "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(annotation).To(o.ContainSubstring("\"capability.openshift.io/name\":\"MachineAPI\""))
} else {
g.Skip("MachineAPI not enabled so co machine-api/cluster-autoscaler wont be present to check annotations")
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-High-54053-Implement tag categories cache for MAPI vsphere provider [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.VSphere)
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-54053"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Scale up machineset")
clusterinfra.ScaleMachineSet(oc, machinesetName, 1)
machineControllerPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-machine-api", "-l", "api=clusterapi,k8s-app=controller", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
machineControllerLog, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("pod/"+machineControllerPodName, "-c", "machine-controller", "-n", "openshift-machine-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect((strings.Contains(machineControllerLog, ", trying to find category by name, it might take time") || strings.Contains(machineControllerLog, "found cached category id value")) && !strings.Contains(machineControllerLog, "unmarshal errors:")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:miyadav-Medium-29351-Use oc explain to see detailed documentation of the resources", func() {
_, err := oc.AdminAPIExtensionsV1Client().CustomResourceDefinitions().Get(context.TODO(), "machines.machine.openshift.io", metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
g.Skip("The cluster does not have pre-requisite CRDs for the test")
}
if err != nil {
e2e.Failf("Failed to get CRD: %v", err)
}
resources := `machines.machine.openshift.io
machinesets.machine.openshift.io
machinehealthchecks.machine.openshift.io
machineautoscalers.autoscaling.openshift.io`
resource := strings.Split(resources, "\n")
for _, explained := range resource {
// Execute `oc explain resource` for each resource
explained, err := oc.AsAdmin().WithoutNamespace().Run("explain").Args(explained).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(explained).To(o.ContainSubstring("apiVersion"))
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-High-76187-Add Paused condition to Machine and MachineSet resources", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure, clusterinfra.OpenStack, clusterinfra.VSphere, clusterinfra.AWS, clusterinfra.GCP)
featuregate, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if featuregate != "{}" {
if strings.Contains(featuregate, "TechPreviewNoUpgrade") {
g.Skip("This case is only suitable for non-techpreview cluster!")
} else if strings.Contains(featuregate, "CustomNoUpgrade") {
// Extract enabled features
enabledFeatures, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.customNoUpgrade.enabled}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(enabledFeatures, "MachineAPIMigration") {
g.Skip("Skipping test: MachineAPIMigration is not enabled in CustomNoUpgrade feature gate.")
}
g.By("Check if MachineAPIMigration enabled, project openshift-cluster-api exists")
project, err := oc.AsAdmin().WithoutNamespace().Run("project").Args(clusterAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(project, "Now using project \"openshift-cluster-api\" on server") {
machinesetauthpritativeAPI, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, "-n", machineAPINamespace, "-o=jsonpath={.items[0].status.conditions[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(machinesetauthpritativeAPI, "\"AuthoritativeAPI is set to MachineAPI\"")).To(o.BeTrue())
}
}
} else {
g.Skip("This case is only suitable for non-techpreview cluster with Mapimigration enabled !")
}
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | 5b684f6a-cd25-4630-8936-107d78f3839c | Author:miyadav-NonHyperShiftHOST-High-37384-Machine API components should honour cluster wide proxy settings | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_cases.go | g.It("Author:miyadav-NonHyperShiftHOST-High-37384-Machine API components should honour cluster wide proxy settings", func() {
g.By("Check if it's a proxy cluster")
httpProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy/cluster", "-o=jsonpath={.spec.httpProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
httpsProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy/cluster", "-o=jsonpath={.spec.httpsProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(httpProxy) == 0 && len(httpsProxy) == 0 {
g.Skip("Skip for non-proxy cluster!")
}
g.By("Check if machine-controller-pod is using cluster proxy")
machineControllerPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", "openshift-machine-api", "-l", "k8s-app=controller", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(machineControllerPod) == 0 {
g.Skip("Skip for no machine-api-controller pod in cluster")
} else {
envMapi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", machineControllerPod, "-n", "openshift-machine-api", "-o=jsonpath={.spec.containers[0].env[0].name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(envMapi) == 0 {
e2e.Failf("jsonpath needs to be reviewed")
} else if strings.Compare(envMapi, "HTTP_PROXY") != 0 {
g.By("machine-api does not uses cluster proxy")
e2e.Failf("For more details refer - BZ 1896704")
}
}
}) | |||||
test case | openshift/openshift-tests-private | e5fe73ba-dab6-46e1-9cb5-7ec2e9524e46 | Author:huliu-Low-34718-Node labels and Affinity definition in PV should match | ['"path/filepath"', '"regexp"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_cases.go | g.It("Author:huliu-Low-34718-Node labels and Affinity definition in PV should match", func() {
miscBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "misc")
pvcTemplate := filepath.Join(miscBaseDir, "pvc34718.yaml")
podTemplate := filepath.Join(miscBaseDir, "pod34718.yaml")
pvc := pvcDescription{
storageSize: "1Gi",
template: pvcTemplate,
}
podName := "task-pv-pod"
pod := exutil.Pod{Name: podName, Namespace: "openshift-machine-api", Template: podTemplate, Parameters: []string{}}
storageclassExists, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sc", "-o", "jsonpath={.items}").Output()
//If no storage class then items string is returned as []
if len(storageclassExists) < 3 {
g.Skip("Storage class not available by default")
}
g.By("Create pvc")
defer pvc.deletePvc(oc)
pvc.createPvc(oc)
g.By("Create pod")
defer pod.Delete(oc)
pod.Create(oc)
nodeName, _ := exutil.GetPodNodeName(oc, "openshift-machine-api", podName)
getNodeLabels, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.labels}", "-n", "openshift-machine-api").Output()
desribePv, _ := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pv", "-n", "openshift-machine-api").Output()
if strings.Contains(getNodeLabels, `region":"`) && strings.Contains(desribePv, "region in ") {
g.By("Check region info")
compileRegex := regexp.MustCompile(`region":"(.*?)"`)
matchArr := compileRegex.FindStringSubmatch(getNodeLabels)
region := matchArr[len(matchArr)-1]
if !strings.Contains(desribePv, "region in ["+region+"]") {
e2e.Failf("Cannot get log region in [" + region + "]")
}
}
}) | |||||
test case | openshift/openshift-tests-private | 229dcb14-4951-45cd-bdfd-80afc6cf853a | Author:miyadav-NonHyperShiftHOST-High-60147-[clusterInfra] check machineapi and clusterautoscaler as optional operator | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_cases.go | g.It("Author:miyadav-NonHyperShiftHOST-High-60147-[clusterInfra] check machineapi and clusterautoscaler as optional operator", func() {
g.By("Check capability shows operator is optional")
capability, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o=jsonpath={.status.capabilities.enabledCapabilities}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//This condition is for clusters installed with baseline capabilties set to NONE
if strings.Contains(capability, "MachineAPI") {
g.By("Check cluster-autoscaler has annotation to confirm optional status")
annotation, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cluster-autoscaler", "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(annotation).To(o.ContainSubstring("\"capability.openshift.io/name\":\"MachineAPI\""))
g.By("Check control-plane-machine-set has annotation to confirm optional status")
annotation, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "control-plane-machine-set", "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(annotation).To(o.ContainSubstring("\"capability.openshift.io/name\":\"MachineAPI\""))
g.By("Check machine-api has annotation to confirm optional status")
annotation, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "machine-api", "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(annotation).To(o.ContainSubstring("\"capability.openshift.io/name\":\"MachineAPI\""))
} else {
g.Skip("MachineAPI not enabled so co machine-api/cluster-autoscaler wont be present to check annotations")
}
}) | |||||
test case | openshift/openshift-tests-private | e0704780-e670-40ec-99a1-328a339d53af | Author:miyadav-NonHyperShiftHOST-High-54053-Implement tag categories cache for MAPI vsphere provider [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', 'apierrors "k8s.io/apimachinery/pkg/api/errors"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_cases.go | g.It("Author:miyadav-NonHyperShiftHOST-High-54053-Implement tag categories cache for MAPI vsphere provider [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.VSphere)
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-54053"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Scale up machineset")
clusterinfra.ScaleMachineSet(oc, machinesetName, 1)
machineControllerPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", "openshift-machine-api", "-l", "api=clusterapi,k8s-app=controller", "-o=jsonpath={.items[0].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
machineControllerLog, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("pod/"+machineControllerPodName, "-c", "machine-controller", "-n", "openshift-machine-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect((strings.Contains(machineControllerLog, ", trying to find category by name, it might take time") || strings.Contains(machineControllerLog, "found cached category id value")) && !strings.Contains(machineControllerLog, "unmarshal errors:")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 67465f65-aafd-4963-a497-a6fb129df452 | Author:miyadav-Medium-29351-Use oc explain to see detailed documentation of the resources | ['"context"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_cases.go | g.It("Author:miyadav-Medium-29351-Use oc explain to see detailed documentation of the resources", func() {
_, err := oc.AdminAPIExtensionsV1Client().CustomResourceDefinitions().Get(context.TODO(), "machines.machine.openshift.io", metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
g.Skip("The cluster does not have pre-requisite CRDs for the test")
}
if err != nil {
e2e.Failf("Failed to get CRD: %v", err)
}
resources := `machines.machine.openshift.io
machinesets.machine.openshift.io
machinehealthchecks.machine.openshift.io
machineautoscalers.autoscaling.openshift.io`
resource := strings.Split(resources, "\n")
for _, explained := range resource {
// Execute `oc explain resource` for each resource
explained, err := oc.AsAdmin().WithoutNamespace().Run("explain").Args(explained).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(explained).To(o.ContainSubstring("apiVersion"))
}
}) | |||||
test case | openshift/openshift-tests-private | b2c18b2c-819a-4bee-92ef-403b9fe30c54 | Author:miyadav-NonHyperShiftHOST-High-76187-Add Paused condition to Machine and MachineSet resources | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_cases.go | g.It("Author:miyadav-NonHyperShiftHOST-High-76187-Add Paused condition to Machine and MachineSet resources", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure, clusterinfra.OpenStack, clusterinfra.VSphere, clusterinfra.AWS, clusterinfra.GCP)
featuregate, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if featuregate != "{}" {
if strings.Contains(featuregate, "TechPreviewNoUpgrade") {
g.Skip("This case is only suitable for non-techpreview cluster!")
} else if strings.Contains(featuregate, "CustomNoUpgrade") {
// Extract enabled features
enabledFeatures, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("featuregate", "cluster", "-o=jsonpath={.spec.customNoUpgrade.enabled}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(enabledFeatures, "MachineAPIMigration") {
g.Skip("Skipping test: MachineAPIMigration is not enabled in CustomNoUpgrade feature gate.")
}
g.By("Check if MachineAPIMigration enabled, project openshift-cluster-api exists")
project, err := oc.AsAdmin().WithoutNamespace().Run("project").Args(clusterAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(project, "Now using project \"openshift-cluster-api\" on server") {
machinesetauthpritativeAPI, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, "-n", machineAPINamespace, "-o=jsonpath={.items[0].status.conditions[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(machinesetauthpritativeAPI, "\"AuthoritativeAPI is set to MachineAPI\"")).To(o.BeTrue())
}
}
} else {
g.Skip("This case is only suitable for non-techpreview cluster with Mapimigration enabled !")
}
}) | |||||
test | openshift/openshift-tests-private | c7599570-5f0b-465e-8fd9-82323b9df24c | cet | import (
"path/filepath"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
//e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet.go | package cet
import (
"path/filepath"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
//e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-node] Container_Engine_Tools crio,scc", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("node-"+getRandomString(), exutil.KubeConfigPath())
buildPruningBaseDir = exutil.FixturePath("testdata", "container_engine_tools")
customTemp = filepath.Join(buildPruningBaseDir, "pod-modify.yaml")
customctrcfgTemp = filepath.Join(buildPruningBaseDir, "containerRuntimeConfig.yaml")
ocp48876PodTemp = filepath.Join(buildPruningBaseDir, "ocp48876Pod.yaml")
podModify = podModifyDescription{
name: "",
namespace: "",
mountpath: "",
command: "",
args: "",
restartPolicy: "",
user: "",
role: "",
level: "",
template: customTemp,
}
ctrcfg = ctrcfgDescription{
name: "",
loglevel: "",
overlay: "",
logsizemax: "",
command: "",
configFile: "",
template: customctrcfgTemp,
}
newapp = newappDescription{
appname: "",
}
ocp48876Pod = ocp48876PodDescription{
name: "",
namespace: "",
template: ocp48876PodTemp,
}
)
// author: [email protected]
g.It("DEPRECATED-Author:pmali-Medium-13117-SeLinuxOptions in pod should apply to container correctly [Flaky]", func() {
oc.SetupProject()
podModify.name = "hello-pod"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "sleep 30"
podModify.restartPolicy = "Always"
podModify.user = "unconfined_u"
podModify.role = "unconfined_r"
podModify.level = "s0:c25,c968"
g.By("Create a pod with selinux options\n")
podModify.create(oc)
g.By("Check pod status\n")
err := podStatus(oc)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check Container SCC Status\n")
err = ContainerSccStatus(oc)
exutil.AssertWaitPollNoErr(err, "scc of pod has no unconfined_u unconfined_r s0:c25,c968")
g.By("Delete Pod\n")
podModify.delete(oc)
})
// author: [email protected]
g.It("Longduration-NonPreRelease-Author:pmali-Medium-22093-Medium-22094-CRIO configuration can be modified via containerruntimeconfig CRD[Disruptive][Slow]", func() {
oc.SetupProject()
ctrcfg.name = "parameter-testing"
ctrcfg.loglevel = "debug"
ctrcfg.overlay = "2G"
ctrcfg.logsizemax = "-1"
g.By("Create Container Runtime Config \n")
ctrcfg.create(oc)
defer cleanupObjectsClusterScope(oc, objectTableRefcscope{"ContainerRuntimeConfig", "parameter-testing"})
g.By("Verify that the settings were applied in CRI-O\n")
err := ctrcfg.checkCtrcfgParameters(oc)
exutil.AssertWaitPollNoErr(err, "cfg is not expected")
g.By("Delete Container Runtime Config \n")
cleanupObjectsClusterScope(oc, objectTableRefcscope{"ContainerRuntimeConfig", "parameter-testing"})
g.By("Make sure machineconfig containerruntime is deleted \n")
err = machineconfigStatus(oc)
exutil.AssertWaitPollNoErr(err, "mc has containerruntime")
g.By("Make sure All the Nodes are in the Ready State \n")
err = checkNodeStatus(oc)
exutil.AssertWaitPollNoErr(err, "node is not ready")
})
// author: [email protected]
g.It("DEPRECATED-Author:pmali-High-43086-nodejs s2i build failure: 'error reading blob from source image' should not occur.", func() {
oc.SetupProject()
newapp.appname = "openshift/nodejs~https://github.com/openshift/nodejs-ex.git"
g.By("Create New Node-js Application \n")
newapp.createNewApp(oc)
g.By("Check pod status\n")
err := podStatus(oc)
exutil.AssertWaitPollNoErr(err, "pod is not running")
buildconfig := buildConfigStatus(oc)
g.By("Build log should not contain error 'error reading blob from source image'\n")
err = buildLog(oc, buildconfig)
exutil.AssertWaitPollNoErr(err, "error reading blob from source image")
})
// author: [email protected]
g.It("Author:pmali-Medium-43102-os field in podman info output should not be empty", func() {
g.By("Check podman info status\n")
err := checkPodmanInfo(oc)
exutil.AssertWaitPollNoErr(err, "podman info is not expected")
})
// author: [email protected]
g.It("Author:pmali-DEPRECATED-High-43789-High-46278-Check podman and crictl version to check if bug fixed", func() {
g.By("Check podman and crictl version\n")
err := checkPodmanCrictlVersion(oc)
exutil.AssertWaitPollNoErr(err, "podman and crictl version are not expected")
})
// author: [email protected]
g.It("Author:pmali-LEVEL0-Longduration-NonPreRelease-High-37290-mco should cope with ContainerRuntimeConfig whose finalizer > 63 characters[Disruptive][Slow]", func() {
ctrcfg.name = "finalizer-test"
ctrcfg.loglevel = "debug"
ctrcfg.overlay = "2G"
ctrcfg.logsizemax = "-1"
g.By("Create Container Runtime Config \n")
ctrcfg.create(oc)
defer func() {
cleanupObjectsClusterScope(oc, objectTableRefcscope{"ContainerRuntimeConfig", "finalizer-test"})
err := getmcpStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
}()
err := getmcpStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
g.By("Verify that ContainerRuntimeConfig is successfully created without any error message\n")
err = ctrcfg.checkCtrcfgStatus(oc)
exutil.AssertWaitPollNoErr(err, "Config is failed")
})
// author: [email protected]
// author: [email protected]
g.It("ConnectedOnly-Author:pmali-Critical-48876-Check ping I src IP does work on a container", func() {
//azure platform can't support this case, skip if it's an azure cluster
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.OpenStack, clusterinfra.VSphere, clusterinfra.BareMetal)
//cluster with proxy can't support this case
if checkProxy(oc) {
g.Skip("This is a proxy cluster, skip the test.")
}
oc.SetupProject()
ocp48876Pod.name = "hello-pod-ocp48876"
ocp48876Pod.namespace = oc.Namespace()
_, err := oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ocp48876Pod.namespace, "security.openshift.io/scc.podSecurityLabelSync=false",
"pod-security.kubernetes.io/enforce=privileged", "--overwrite").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a pod \n")
ocp48876Pod.create(oc)
defer ocp48876Pod.delete(oc)
g.By("Check pod status\n")
err = podStatus(oc)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Get Pod Name \n")
podName := getPodName(oc, oc.Namespace())
g.By("Get the pod IP address\n")
ipv4 := getPodIPv4(oc, podName, oc.Namespace())
g.By("Ping with IP address\n")
cmd := "ping -c 2 8.8.8.8 -I " + ipv4
err = pingIpaddr(oc, oc.Namespace(), podName, cmd)
exutil.AssertWaitPollNoErr(err, "Ping Unsuccessful with IP address")
g.By("Ping with Interface Name\n")
cmd = "ping -c 2 8.8.8.8 -I eth0"
err = pingIpaddr(oc, oc.Namespace(), podName, cmd)
exutil.AssertWaitPollNoErr(err, "Ping Unsuccessful with Interface")
})
})
| package cet | ||||
test case | openshift/openshift-tests-private | 1fda248f-f241-4825-8698-9c0b699b0359 | DEPRECATED-Author:pmali-Medium-13117-SeLinuxOptions in pod should apply to container correctly [Flaky] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet.go | g.It("DEPRECATED-Author:pmali-Medium-13117-SeLinuxOptions in pod should apply to container correctly [Flaky]", func() {
oc.SetupProject()
podModify.name = "hello-pod"
podModify.namespace = oc.Namespace()
podModify.mountpath = "/init-test"
podModify.command = "/bin/bash"
podModify.args = "sleep 30"
podModify.restartPolicy = "Always"
podModify.user = "unconfined_u"
podModify.role = "unconfined_r"
podModify.level = "s0:c25,c968"
g.By("Create a pod with selinux options\n")
podModify.create(oc)
g.By("Check pod status\n")
err := podStatus(oc)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Check Container SCC Status\n")
err = ContainerSccStatus(oc)
exutil.AssertWaitPollNoErr(err, "scc of pod has no unconfined_u unconfined_r s0:c25,c968")
g.By("Delete Pod\n")
podModify.delete(oc)
}) | ||||||
test case | openshift/openshift-tests-private | a33d52e9-0f89-41a1-b1af-38d0fb354de9 | Longduration-NonPreRelease-Author:pmali-Medium-22093-Medium-22094-CRIO configuration can be modified via containerruntimeconfig CRD[Disruptive][Slow] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet.go | g.It("Longduration-NonPreRelease-Author:pmali-Medium-22093-Medium-22094-CRIO configuration can be modified via containerruntimeconfig CRD[Disruptive][Slow]", func() {
oc.SetupProject()
ctrcfg.name = "parameter-testing"
ctrcfg.loglevel = "debug"
ctrcfg.overlay = "2G"
ctrcfg.logsizemax = "-1"
g.By("Create Container Runtime Config \n")
ctrcfg.create(oc)
defer cleanupObjectsClusterScope(oc, objectTableRefcscope{"ContainerRuntimeConfig", "parameter-testing"})
g.By("Verify that the settings were applied in CRI-O\n")
err := ctrcfg.checkCtrcfgParameters(oc)
exutil.AssertWaitPollNoErr(err, "cfg is not expected")
g.By("Delete Container Runtime Config \n")
cleanupObjectsClusterScope(oc, objectTableRefcscope{"ContainerRuntimeConfig", "parameter-testing"})
g.By("Make sure machineconfig containerruntime is deleted \n")
err = machineconfigStatus(oc)
exutil.AssertWaitPollNoErr(err, "mc has containerruntime")
g.By("Make sure All the Nodes are in the Ready State \n")
err = checkNodeStatus(oc)
exutil.AssertWaitPollNoErr(err, "node is not ready")
}) | ||||||
test case | openshift/openshift-tests-private | 6b7e633f-a841-47d1-8139-44f1aec75e84 | DEPRECATED-Author:pmali-High-43086-nodejs s2i build failure: 'error reading blob from source image' should not occur. | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet.go | g.It("DEPRECATED-Author:pmali-High-43086-nodejs s2i build failure: 'error reading blob from source image' should not occur.", func() {
oc.SetupProject()
newapp.appname = "openshift/nodejs~https://github.com/openshift/nodejs-ex.git"
g.By("Create New Node-js Application \n")
newapp.createNewApp(oc)
g.By("Check pod status\n")
err := podStatus(oc)
exutil.AssertWaitPollNoErr(err, "pod is not running")
buildconfig := buildConfigStatus(oc)
g.By("Build log should not contain error 'error reading blob from source image'\n")
err = buildLog(oc, buildconfig)
exutil.AssertWaitPollNoErr(err, "error reading blob from source image")
}) | ||||||
test case | openshift/openshift-tests-private | 53c58769-9f54-4c79-a93a-b49d08d57c9c | Author:pmali-Medium-43102-os field in podman info output should not be empty | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet.go | g.It("Author:pmali-Medium-43102-os field in podman info output should not be empty", func() {
g.By("Check podman info status\n")
err := checkPodmanInfo(oc)
exutil.AssertWaitPollNoErr(err, "podman info is not expected")
}) | ||||||
test case | openshift/openshift-tests-private | c769d98e-585d-4af6-b674-bbcf6370a11b | Author:pmali-DEPRECATED-High-43789-High-46278-Check podman and crictl version to check if bug fixed | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet.go | g.It("Author:pmali-DEPRECATED-High-43789-High-46278-Check podman and crictl version to check if bug fixed", func() {
g.By("Check podman and crictl version\n")
err := checkPodmanCrictlVersion(oc)
exutil.AssertWaitPollNoErr(err, "podman and crictl version are not expected")
}) | ||||||
test case | openshift/openshift-tests-private | 733d91c3-033f-4bfa-82ec-8b1b123c6d6d | Author:pmali-LEVEL0-Longduration-NonPreRelease-High-37290-mco should cope with ContainerRuntimeConfig whose finalizer > 63 characters[Disruptive][Slow] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet.go | g.It("Author:pmali-LEVEL0-Longduration-NonPreRelease-High-37290-mco should cope with ContainerRuntimeConfig whose finalizer > 63 characters[Disruptive][Slow]", func() {
ctrcfg.name = "finalizer-test"
ctrcfg.loglevel = "debug"
ctrcfg.overlay = "2G"
ctrcfg.logsizemax = "-1"
g.By("Create Container Runtime Config \n")
ctrcfg.create(oc)
defer func() {
cleanupObjectsClusterScope(oc, objectTableRefcscope{"ContainerRuntimeConfig", "finalizer-test"})
err := getmcpStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
}()
err := getmcpStatus(oc, "worker")
exutil.AssertWaitPollNoErr(err, "mcp is not updated")
g.By("Verify that ContainerRuntimeConfig is successfully created without any error message\n")
err = ctrcfg.checkCtrcfgStatus(oc)
exutil.AssertWaitPollNoErr(err, "Config is failed")
}) | ||||||
test case | openshift/openshift-tests-private | 9b055569-a36d-4060-b9bb-46605ca5bcb3 | ConnectedOnly-Author:pmali-Critical-48876-Check ping I src IP does work on a container | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet.go | g.It("ConnectedOnly-Author:pmali-Critical-48876-Check ping I src IP does work on a container", func() {
//azure platform can't support this case, skip if it's an azure cluster
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.OpenStack, clusterinfra.VSphere, clusterinfra.BareMetal)
//cluster with proxy can't support this case
if checkProxy(oc) {
g.Skip("This is a proxy cluster, skip the test.")
}
oc.SetupProject()
ocp48876Pod.name = "hello-pod-ocp48876"
ocp48876Pod.namespace = oc.Namespace()
_, err := oc.AsAdmin().WithoutNamespace().Run("label").Args("namespace", ocp48876Pod.namespace, "security.openshift.io/scc.podSecurityLabelSync=false",
"pod-security.kubernetes.io/enforce=privileged", "--overwrite").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a pod \n")
ocp48876Pod.create(oc)
defer ocp48876Pod.delete(oc)
g.By("Check pod status\n")
err = podStatus(oc)
exutil.AssertWaitPollNoErr(err, "pod is not running")
g.By("Get Pod Name \n")
podName := getPodName(oc, oc.Namespace())
g.By("Get the pod IP address\n")
ipv4 := getPodIPv4(oc, podName, oc.Namespace())
g.By("Ping with IP address\n")
cmd := "ping -c 2 8.8.8.8 -I " + ipv4
err = pingIpaddr(oc, oc.Namespace(), podName, cmd)
exutil.AssertWaitPollNoErr(err, "Ping Unsuccessful with IP address")
g.By("Ping with Interface Name\n")
cmd = "ping -c 2 8.8.8.8 -I eth0"
err = pingIpaddr(oc, oc.Namespace(), podName, cmd)
exutil.AssertWaitPollNoErr(err, "Ping Unsuccessful with Interface")
}) | |||||
file | openshift/openshift-tests-private | d5086c00-e30e-4487-929c-a098f50f6cc9 | cet_utils | import (
"fmt"
"math/rand"
"os/exec"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | package cet
import (
"fmt"
"math/rand"
"os/exec"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type podModifyDescription struct {
name string
namespace string
mountpath string
command string
args string
restartPolicy string
user string
role string
level string
template string
}
type ctrcfgDescription struct {
namespace string
name string
pidlimit int
loglevel string
overlay string
logsizemax string
command string
configFile string
template string
}
type ocp48876PodDescription struct {
name string
namespace string
template string
}
type newappDescription struct {
appname string
}
type objectTableRefcscope struct {
kind string
name string
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
func (ocp48876Pod *ocp48876PodDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ocp48876Pod.template, "-p", "NAME="+ocp48876Pod.name, "NAMESPACE="+ocp48876Pod.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (ocp48876Pod *ocp48876PodDescription) delete(oc *exutil.CLI) error {
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ocp48876Pod.namespace, "pod", ocp48876Pod.name).Execute()
}
func (podModify *podModifyDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podModify.template, "-p", "NAME="+podModify.name, "NAMESPACE="+podModify.namespace, "MOUNTPATH="+podModify.mountpath, "COMMAND="+podModify.command, "ARGS="+podModify.args, "POLICY="+podModify.restartPolicy, "USER="+podModify.user, "ROLE="+podModify.role, "LEVEL="+podModify.level)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (podModify *podModifyDescription) delete(oc *exutil.CLI) error {
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podModify.namespace, "pod", podModify.name).Execute()
}
func createResourceFromTemplate(oc *exutil.CLI, parameters ...string) error {
var jsonCfg string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "node-config.json")
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
jsonCfg = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters))
e2e.Logf("The resource is %s", jsonCfg)
return oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", jsonCfg).Execute()
}
func podStatusReason(oc *exutil.CLI) error {
e2e.Logf("check if pod is available")
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].status.initContainerStatuses[*].state.waiting.reason}", "-n", oc.Namespace()).Output()
e2e.Logf("the status of pod is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "CrashLoopBackOff") {
e2e.Logf(" Pod failed status reason is :%s", status)
return true, nil
}
return false, nil
})
}
func podStatusterminatedReason(oc *exutil.CLI) error {
e2e.Logf("check if pod is available")
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].status.initContainerStatuses[*].state.terminated.reason}", "-n", oc.Namespace()).Output()
e2e.Logf("the status of pod is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "Error") {
e2e.Logf(" Pod failed status reason is :%s", status)
return true, nil
}
return false, nil
})
}
func podStatus(oc *exutil.CLI) error {
e2e.Logf("check if pod is available")
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].status.phase}", "-n", oc.Namespace()).Output()
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "Running") {
e2e.Logf("Pod status is : %s", status)
return true, nil
}
return false, nil
})
}
func volStatus(oc *exutil.CLI) error {
e2e.Logf("check content of volume")
return wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("init-volume", "-c", "hello-pod", "cat", "/init-test/volume-test", "-n", oc.Namespace()).Output()
e2e.Logf("The content of the vol is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "This is OCP volume test") {
e2e.Logf(" Init containers with volume work fine \n")
return true, nil
}
return false, nil
})
}
// ContainerSccStatus get container scc status
func ContainerSccStatus(oc *exutil.CLI) error {
return wait.Poll(1*time.Second, 1*time.Second, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "hello-pod", "-o=jsonpath={.spec.securityContext.seLinuxOptions.*}", "-n", oc.Namespace()).Output()
e2e.Logf("The Container SCC Content is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "unconfined_u unconfined_r s0:c25,c968") {
e2e.Logf("SeLinuxOptions in pod applied to container Sucessfully \n")
return true, nil
}
return false, nil
})
}
func (ctrcfg *ctrcfgDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ctrcfg.template, "-p", "NAME="+ctrcfg.name, "LOGLEVEL="+ctrcfg.loglevel, "OVERLAY="+ctrcfg.overlay, "LOGSIZEMAX="+ctrcfg.logsizemax)
o.Expect(err).NotTo(o.HaveOccurred())
}
func cleanupObjectsClusterScope(oc *exutil.CLI, objs ...objectTableRefcscope) error {
return wait.Poll(1*time.Second, 1*time.Second, func() (bool, error) {
for _, v := range objs {
e2e.Logf("\n Start to remove: %v", v)
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(v.kind, v.name).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(status, "Error") {
e2e.Logf("Error getting resources... Seems resources objects are already deleted. \n")
return true, nil
}
_, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args(v.kind, v.name).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
return true, nil
})
}
func (ctrcfg *ctrcfgDescription) checkCtrcfgParameters(oc *exutil.CLI) error {
return wait.Poll(3*time.Minute, 11*time.Minute, func() (bool, error) {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/worker=", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
node := strings.Fields(nodeName)
for _, v := range node {
nodeReadyBool, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", v), "-o=jsonpath={.status.conditions[?(@.reason=='KubeletReady')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The Node Ready status is %v", nodeReadyBool)
if nodeReadyBool == "True" {
criostatus, err := exutil.DebugNodeWithChroot(oc, fmt.Sprintf("%s", v), "crio", "config")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(`\nCRI-O PARAMETER ON THE WORKER NODE :` + fmt.Sprintf("%s", v))
wait.Poll(3*time.Minute, 10*time.Minute, func() (bool, error) {
if strings.Contains(string(criostatus), "pids_limit = 2048") && strings.Contains(string(criostatus), "log_level = \"debug\"") {
e2e.Logf("\nCtrcfg parameter pod limit and log_level configured successfully")
return true, nil
}
return false, nil
})
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return true, nil
})
}
func buildLog(oc *exutil.CLI, buildconfig string) error {
return wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("buildconfig.build.openshift.io/"+buildconfig, "-n", oc.Namespace()).Output()
e2e.Logf("Here is the build log %v\n", status)
if err != nil {
e2e.Logf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "error reading blob from source image") {
e2e.Logf(" This is Error, File Bug. \n")
return false, nil
}
return true, nil
})
}
func checkPodmanInfo(oc *exutil.CLI) error {
return wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
node := strings.Fields(nodeName)
for _, v := range node {
nodeStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", v), "-o=jsonpath={.status.conditions[3].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", v, nodeStatus)
if nodeStatus == "Ready" {
podmaninfo, err := exutil.DebugNodeWithChroot(oc, fmt.Sprintf("%s", v), "podman", "info")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(`\nNODE NAME IS :` + fmt.Sprintf("%s", v))
o.Expect(podmaninfo).To(o.ContainSubstring("arch:"))
o.Expect(podmaninfo).To(o.ContainSubstring("os:"))
e2e.Logf("\nPodman info parameter arch and os configured successfully")
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return true, nil
})
}
func (newapp *newappDescription) createNewApp(oc *exutil.CLI) error {
return wait.Poll(30*time.Second, 1*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args(newapp.appname, "-n", oc.Namespace()).Output()
e2e.Logf("Here is the newapp log %v\n", status)
if err != nil {
e2e.Logf("the result of ReadFile:%v", err)
return false, nil
}
return true, nil
})
}
func buildConfigStatus(oc *exutil.CLI) string {
var buildConfigStatus string
buildConfigStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("buildconfig", "-o=jsonpath={.items[0].metadata.name}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The buildconfig Name is: %v", buildConfigStatus)
return buildConfigStatus
}
func checkNodeStatus(oc *exutil.CLI) error {
return wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
node := strings.Fields(nodeName)
for _, v := range node {
nodeReadyBool, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", v), "-o=jsonpath={.status.conditions[?(@.reason=='KubeletReady')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Ready Status is %s\n", v, nodeReadyBool)
if nodeReadyBool == "True" {
e2e.Logf("\n NODES ARE READY\n ")
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return true, nil
})
}
func machineconfigStatus(oc *exutil.CLI) error {
return wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineconfig", "-o=jsonpath={.items[*].metadata.name}").Output()
e2e.Logf("Here is the machineconfig %v\n", status)
if err != nil {
e2e.Logf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "containerruntime") {
e2e.Logf(" This is Error, File Bug. \n")
return false, nil
}
return true, nil
})
}
func checkPodmanCrictlVersion(oc *exutil.CLI) error {
return wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
node := strings.Fields(nodeName)
for _, v := range node {
nodeStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", v), "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", v, nodeStatus)
if nodeStatus == "True" {
podmanver, err := exutil.DebugNodeWithChroot(oc, fmt.Sprintf("%s", v), "podman", "--version")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Podman version is:\n %v\n", podmanver)
crictlver, err1 := exutil.DebugNodeWithChroot(oc, fmt.Sprintf("%s", v), "crictl", "version")
o.Expect(err1).NotTo(o.HaveOccurred())
e2e.Logf("Crictl version is:\n %v\n", crictlver)
if strings.Contains(string(podmanver), "podman version 4.") && strings.Contains(string(crictlver), "RuntimeVersion: 1.2") {
e2e.Logf("\n Podman and crictl is on latest version")
return true, nil
} else {
e2e.Logf("\nPodman and crictl version are NOT Updated")
return false, nil
}
} else {
e2e.Logf("\n NODE IS NOT READY\n ")
}
}
return false, nil
})
}
func (ctrcfg *ctrcfgDescription) checkCtrcfgStatus(oc *exutil.CLI) error {
return wait.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ContainerRuntimeConfig", ctrcfg.name, "-o=jsonpath={.status.conditions[0].message}").Output()
e2e.Logf("The ContainerRuntimeConfig message is %v", status)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(status, "Success") {
e2e.Logf("ContainerRuntimeConfig whose finalizer > 63 characters is applied Sucessfully \n")
return true, nil
}
return false, nil
})
}
func getPodName(oc *exutil.CLI, ns string) string {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nPod Name is is %v", podName)
return podName
}
func getPodIPv4(oc *exutil.CLI, podName string, ns string) string {
IPv4add, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-o=jsonpath={.status.podIP}", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nPod IP address is %v", IPv4add)
return IPv4add
}
func pingIpaddr(oc *exutil.CLI, ns string, podName string, cmd string) error {
return wait.Poll(1*time.Second, 1*time.Second, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("-n", ns, podName, "--", "/bin/bash", "-c", cmd).OutputToFile("pingipaddr.txt")
o.Expect(err).NotTo(o.HaveOccurred())
result, err1 := exec.Command("bash", "-c", "cat "+status+" | egrep '64 bytes from 8.8.8.8: icmp_seq'").Output()
if err1 != nil {
e2e.Failf("the result of ReadFile:%v", err1)
return false, nil
}
e2e.Logf("\nPing output is %s\n", result)
if strings.Contains(string(result), "64 bytes from 8.8.8.8: icmp_seq") {
e2e.Logf("\nPing Successful \n")
return true, nil
}
return false, nil
})
}
func checkProxy(oc *exutil.CLI) bool {
httpProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-o=jsonpath={.status.httpProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
httpsProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-o=jsonpath={.status.httpsProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if httpProxy != "" || httpsProxy != "" {
return true
}
return false
}
func getmcpStatus(oc *exutil.CLI, nodeSelector string) error {
return wait.Poll(10*time.Second, 15*time.Minute, func() (bool, error) {
mCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.machineCount}").Output()
unmCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.unavailableMachineCount}").Output()
dmCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.degradedMachineCount}").Output()
rmCount, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", nodeSelector, "-n", oc.Namespace(), "-o=jsonpath={.status.readyMachineCount}").Output()
e2e.Logf("MachineCount:%v unavailableMachineCount:%v degradedMachineCount:%v ReadyMachineCount:%v", mCount, unmCount, dmCount, rmCount)
if strings.Compare(mCount, rmCount) == 0 && strings.Compare(unmCount, dmCount) == 0 {
return true, nil
}
return false, nil
})
}
| package cet | ||||
function | openshift/openshift-tests-private | cba3852d-5398-49ea-b742-f0845128a1a4 | getRandomString | ['"math/rand"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
} | cet | ||||
function | openshift/openshift-tests-private | ee3ef7be-cf11-40ea-8b55-d4e1244fc6c4 | create | ['ocp48876PodDescription'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func (ocp48876Pod *ocp48876PodDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ocp48876Pod.template, "-p", "NAME="+ocp48876Pod.name, "NAMESPACE="+ocp48876Pod.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
} | cet | ||||
function | openshift/openshift-tests-private | 318c173b-7d28-44f3-8dcb-c933e096e3e9 | delete | ['ocp48876PodDescription'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func (ocp48876Pod *ocp48876PodDescription) delete(oc *exutil.CLI) error {
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", ocp48876Pod.namespace, "pod", ocp48876Pod.name).Execute()
} | cet | ||||
function | openshift/openshift-tests-private | a46f3093-8687-4d4c-adac-6a707280690f | create | ['podModifyDescription'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func (podModify *podModifyDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", podModify.template, "-p", "NAME="+podModify.name, "NAMESPACE="+podModify.namespace, "MOUNTPATH="+podModify.mountpath, "COMMAND="+podModify.command, "ARGS="+podModify.args, "POLICY="+podModify.restartPolicy, "USER="+podModify.user, "ROLE="+podModify.role, "LEVEL="+podModify.level)
o.Expect(err).NotTo(o.HaveOccurred())
} | cet | ||||
function | openshift/openshift-tests-private | 08552ced-a997-449f-bcfc-e98e449179dd | delete | ['podModifyDescription'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func (podModify *podModifyDescription) delete(oc *exutil.CLI) error {
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("-n", podModify.namespace, "pod", podModify.name).Execute()
} | cet | ||||
function | openshift/openshift-tests-private | 8f050dd0-26ba-4c25-8c90-4b937b2438ff | createResourceFromTemplate | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func createResourceFromTemplate(oc *exutil.CLI, parameters ...string) error {
var jsonCfg string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "node-config.json")
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
jsonCfg = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters))
e2e.Logf("The resource is %s", jsonCfg)
return oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", jsonCfg).Execute()
} | cet | ||||
function | openshift/openshift-tests-private | 5c5576c2-6ed0-4765-a9f2-45327d48d6db | podStatusReason | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func podStatusReason(oc *exutil.CLI) error {
e2e.Logf("check if pod is available")
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].status.initContainerStatuses[*].state.waiting.reason}", "-n", oc.Namespace()).Output()
e2e.Logf("the status of pod is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "CrashLoopBackOff") {
e2e.Logf(" Pod failed status reason is :%s", status)
return true, nil
}
return false, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | 846dee7e-59cb-4158-a793-79ef48037b88 | podStatusterminatedReason | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func podStatusterminatedReason(oc *exutil.CLI) error {
e2e.Logf("check if pod is available")
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].status.initContainerStatuses[*].state.terminated.reason}", "-n", oc.Namespace()).Output()
e2e.Logf("the status of pod is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "Error") {
e2e.Logf(" Pod failed status reason is :%s", status)
return true, nil
}
return false, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | d8db1d22-105d-493c-94d4-004088a9fa1d | podStatus | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func podStatus(oc *exutil.CLI) error {
e2e.Logf("check if pod is available")
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].status.phase}", "-n", oc.Namespace()).Output()
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "Running") {
e2e.Logf("Pod status is : %s", status)
return true, nil
}
return false, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | f18c08aa-91d0-41df-acef-3b505a84c1d1 | volStatus | ['"os/exec"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func volStatus(oc *exutil.CLI) error {
e2e.Logf("check content of volume")
return wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args("init-volume", "-c", "hello-pod", "cat", "/init-test/volume-test", "-n", oc.Namespace()).Output()
e2e.Logf("The content of the vol is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "This is OCP volume test") {
e2e.Logf(" Init containers with volume work fine \n")
return true, nil
}
return false, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | 89f20a15-57b8-4ca5-b503-2d1155c0d6cc | ContainerSccStatus | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func ContainerSccStatus(oc *exutil.CLI) error {
return wait.Poll(1*time.Second, 1*time.Second, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "hello-pod", "-o=jsonpath={.spec.securityContext.seLinuxOptions.*}", "-n", oc.Namespace()).Output()
e2e.Logf("The Container SCC Content is %v", status)
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "unconfined_u unconfined_r s0:c25,c968") {
e2e.Logf("SeLinuxOptions in pod applied to container Sucessfully \n")
return true, nil
}
return false, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | b8468d2c-1e9b-44fa-94c9-e08f3bfafb8a | create | ['ctrcfgDescription'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func (ctrcfg *ctrcfgDescription) create(oc *exutil.CLI) {
err := createResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ctrcfg.template, "-p", "NAME="+ctrcfg.name, "LOGLEVEL="+ctrcfg.loglevel, "OVERLAY="+ctrcfg.overlay, "LOGSIZEMAX="+ctrcfg.logsizemax)
o.Expect(err).NotTo(o.HaveOccurred())
} | cet | ||||
function | openshift/openshift-tests-private | cf889c76-e2d6-48bf-9fe5-5e9136ffd016 | cleanupObjectsClusterScope | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['objectTableRefcscope'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func cleanupObjectsClusterScope(oc *exutil.CLI, objs ...objectTableRefcscope) error {
return wait.Poll(1*time.Second, 1*time.Second, func() (bool, error) {
for _, v := range objs {
e2e.Logf("\n Start to remove: %v", v)
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(v.kind, v.name).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(status, "Error") {
e2e.Logf("Error getting resources... Seems resources objects are already deleted. \n")
return true, nil
}
_, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args(v.kind, v.name).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}
return true, nil
})
} | cet | |||
function | openshift/openshift-tests-private | 4584089c-6d98-4efa-a4b9-aa7314ec61de | checkCtrcfgParameters | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['ctrcfgDescription'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func (ctrcfg *ctrcfgDescription) checkCtrcfgParameters(oc *exutil.CLI) error {
return wait.Poll(3*time.Minute, 11*time.Minute, func() (bool, error) {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--selector=node-role.kubernetes.io/worker=", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
node := strings.Fields(nodeName)
for _, v := range node {
nodeReadyBool, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", v), "-o=jsonpath={.status.conditions[?(@.reason=='KubeletReady')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The Node Ready status is %v", nodeReadyBool)
if nodeReadyBool == "True" {
criostatus, err := exutil.DebugNodeWithChroot(oc, fmt.Sprintf("%s", v), "crio", "config")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(`\nCRI-O PARAMETER ON THE WORKER NODE :` + fmt.Sprintf("%s", v))
wait.Poll(3*time.Minute, 10*time.Minute, func() (bool, error) {
if strings.Contains(string(criostatus), "pids_limit = 2048") && strings.Contains(string(criostatus), "log_level = \"debug\"") {
e2e.Logf("\nCtrcfg parameter pod limit and log_level configured successfully")
return true, nil
}
return false, nil
})
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return true, nil
})
} | cet | |||
function | openshift/openshift-tests-private | 80556a55-281b-47c6-a5b2-36cf45bbd193 | buildLog | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func buildLog(oc *exutil.CLI, buildconfig string) error {
return wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("buildconfig.build.openshift.io/"+buildconfig, "-n", oc.Namespace()).Output()
e2e.Logf("Here is the build log %v\n", status)
if err != nil {
e2e.Logf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "error reading blob from source image") {
e2e.Logf(" This is Error, File Bug. \n")
return false, nil
}
return true, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | 66bfff27-d369-45fe-9e35-c8520f4a4867 | checkPodmanInfo | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func checkPodmanInfo(oc *exutil.CLI) error {
return wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
node := strings.Fields(nodeName)
for _, v := range node {
nodeStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", v), "-o=jsonpath={.status.conditions[3].type}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", v, nodeStatus)
if nodeStatus == "Ready" {
podmaninfo, err := exutil.DebugNodeWithChroot(oc, fmt.Sprintf("%s", v), "podman", "info")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf(`\nNODE NAME IS :` + fmt.Sprintf("%s", v))
o.Expect(podmaninfo).To(o.ContainSubstring("arch:"))
o.Expect(podmaninfo).To(o.ContainSubstring("os:"))
e2e.Logf("\nPodman info parameter arch and os configured successfully")
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return true, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | 11cff26c-9b11-4b9c-9a4b-41efee30cc36 | createNewApp | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['newappDescription'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func (newapp *newappDescription) createNewApp(oc *exutil.CLI) error {
return wait.Poll(30*time.Second, 1*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args(newapp.appname, "-n", oc.Namespace()).Output()
e2e.Logf("Here is the newapp log %v\n", status)
if err != nil {
e2e.Logf("the result of ReadFile:%v", err)
return false, nil
}
return true, nil
})
} | cet | |||
function | openshift/openshift-tests-private | cc259982-a0d1-4542-b393-1c86224a3dc3 | buildConfigStatus | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func buildConfigStatus(oc *exutil.CLI) string {
var buildConfigStatus string
buildConfigStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("buildconfig", "-o=jsonpath={.items[0].metadata.name}", "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("The buildconfig Name is: %v", buildConfigStatus)
return buildConfigStatus
} | cet | |||||
function | openshift/openshift-tests-private | 7dfd035a-9227-4db7-a8c4-37db8ca2d1a0 | checkNodeStatus | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func checkNodeStatus(oc *exutil.CLI) error {
return wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
node := strings.Fields(nodeName)
for _, v := range node {
nodeReadyBool, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", v), "-o=jsonpath={.status.conditions[?(@.reason=='KubeletReady')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Ready Status is %s\n", v, nodeReadyBool)
if nodeReadyBool == "True" {
e2e.Logf("\n NODES ARE READY\n ")
} else {
e2e.Logf("\n NODES ARE NOT READY\n ")
}
}
return true, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | d323bdca-09d7-4772-98c9-9e7c6b82bcc5 | machineconfigStatus | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func machineconfigStatus(oc *exutil.CLI) error {
return wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineconfig", "-o=jsonpath={.items[*].metadata.name}").Output()
e2e.Logf("Here is the machineconfig %v\n", status)
if err != nil {
e2e.Logf("the result of ReadFile:%v", err)
return false, nil
}
if strings.Contains(status, "containerruntime") {
e2e.Logf(" This is Error, File Bug. \n")
return false, nil
}
return true, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | 3d18e2c1-615d-4c5f-b415-a6b2aebfc1ff | checkPodmanCrictlVersion | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func checkPodmanCrictlVersion(oc *exutil.CLI) error {
return wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) {
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode Names are %v", nodeName)
node := strings.Fields(nodeName)
for _, v := range node {
nodeStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", fmt.Sprintf("%s", v), "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nNode %s Status is %s\n", v, nodeStatus)
if nodeStatus == "True" {
podmanver, err := exutil.DebugNodeWithChroot(oc, fmt.Sprintf("%s", v), "podman", "--version")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Podman version is:\n %v\n", podmanver)
crictlver, err1 := exutil.DebugNodeWithChroot(oc, fmt.Sprintf("%s", v), "crictl", "version")
o.Expect(err1).NotTo(o.HaveOccurred())
e2e.Logf("Crictl version is:\n %v\n", crictlver)
if strings.Contains(string(podmanver), "podman version 4.") && strings.Contains(string(crictlver), "RuntimeVersion: 1.2") {
e2e.Logf("\n Podman and crictl is on latest version")
return true, nil
} else {
e2e.Logf("\nPodman and crictl version are NOT Updated")
return false, nil
}
} else {
e2e.Logf("\n NODE IS NOT READY\n ")
}
}
return false, nil
})
} | cet | ||||
function | openshift/openshift-tests-private | 64feba4e-2130-458e-bba3-94e75cb8debc | checkCtrcfgStatus | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['ctrcfgDescription'] | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func (ctrcfg *ctrcfgDescription) checkCtrcfgStatus(oc *exutil.CLI) error {
return wait.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ContainerRuntimeConfig", ctrcfg.name, "-o=jsonpath={.status.conditions[0].message}").Output()
e2e.Logf("The ContainerRuntimeConfig message is %v", status)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(status, "Success") {
e2e.Logf("ContainerRuntimeConfig whose finalizer > 63 characters is applied Sucessfully \n")
return true, nil
}
return false, nil
})
} | cet | |||
function | openshift/openshift-tests-private | eb086838-4235-4971-bf62-b13225763a77 | getPodName | github.com/openshift/openshift-tests-private/test/extended/container_engine_tools/cet_utils.go | func getPodName(oc *exutil.CLI, ns string) string {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-n", ns).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("\nPod Name is is %v", podName)
return podName
} | cet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.