element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test | openshift/openshift-tests-private | 68fa723f-9dc9-42f4-a040-8e9ad8ccfea9 | mapi | import (
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/mapi.go | package clusterinfrastructure
import (
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure MAPI", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("mapi-operator", exutil.KubeConfigPath())
)
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Medium-46078-Signal when mao no-op in the clusterOperator status conditions", func() {
g.By("watch the message from machine-api(mapi) clusteroperator ")
if clusterinfra.CheckPlatform(oc) == clusterinfra.None {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "machine-api", "-o=jsonpath={.status.conditions}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("Cluster Machine API Operator is in NoOp mode"))
} else {
e2e.Logf("Only baremetal platform supported for the test")
g.Skip("We have to skip the test")
}
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | 5a2e8f85-61f7-47fa-a682-3ddce3a54623 | Author:miyadav-NonHyperShiftHOST-Medium-46078-Signal when mao no-op in the clusterOperator status conditions | ['"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/mapi.go | g.It("Author:miyadav-NonHyperShiftHOST-Medium-46078-Signal when mao no-op in the clusterOperator status conditions", func() {
g.By("watch the message from machine-api(mapi) clusteroperator ")
if clusterinfra.CheckPlatform(oc) == clusterinfra.None {
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "machine-api", "-o=jsonpath={.status.conditions}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("Cluster Machine API Operator is in NoOp mode"))
} else {
e2e.Logf("Only baremetal platform supported for the test")
g.Skip("We have to skip the test")
}
}) | |||||
file | openshift/openshift-tests-private | da94a104-b586-48a9-a36f-ebd121af5fa7 | mhc_util | import (
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/mhc_util.go | package clusterinfrastructure
import (
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type mhcDescription struct {
machinesetName string
machineRole string
clusterid string
namespace string
maxunhealthy string
name string
template string
}
func (mhc *mhcDescription) createMhc(oc *exutil.CLI) {
e2e.Logf("Creating machine health check ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", mhc.template, "-p", "NAME="+mhc.name, "MAXUNHEALTHY="+mhc.maxunhealthy, "MACHINESET_NAME="+mhc.machinesetName, "MACHINE_ROLE="+mhc.machineRole, "CLUSTERID="+mhc.clusterid, "NAMESPACE="+machineAPINamespace)
o.Expect(err).NotTo(o.HaveOccurred())
if err != nil {
e2e.Logf("Please check mhc creation, it has failed")
}
}
func (mhc *mhcDescription) deleteMhc(oc *exutil.CLI) error {
e2e.Logf("Deleting machinehealthcheck ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMHC, mhc.name, "-n", mhc.namespace).Execute()
}
| package clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 2610db74-a063-4c9b-8829-31f1e9bbce9e | createMhc | ['mhcDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/mhc_util.go | func (mhc *mhcDescription) createMhc(oc *exutil.CLI) {
e2e.Logf("Creating machine health check ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", mhc.template, "-p", "NAME="+mhc.name, "MAXUNHEALTHY="+mhc.maxunhealthy, "MACHINESET_NAME="+mhc.machinesetName, "MACHINE_ROLE="+mhc.machineRole, "CLUSTERID="+mhc.clusterid, "NAMESPACE="+machineAPINamespace)
o.Expect(err).NotTo(o.HaveOccurred())
if err != nil {
e2e.Logf("Please check mhc creation, it has failed")
}
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 0d03c693-19a3-4cd8-b9d4-090d9d9cc25c | deleteMhc | ['mhcDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/mhc_util.go | func (mhc *mhcDescription) deleteMhc(oc *exutil.CLI) error {
e2e.Logf("Deleting machinehealthcheck ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMHC, mhc.name, "-n", mhc.namespace).Execute()
} | clusterinfrastructure | ||||
file | openshift/openshift-tests-private | 3ad4262a-92df-4438-b39a-827b9ea64b7a | misc_util | import (
"fmt"
"strconv"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_util.go | package clusterinfrastructure
import (
"fmt"
"strconv"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type defaultMachinesetAzureDescription struct {
name string
namespace string
template string
clustername string
location string
vnet string
subnet string
networkResourceGroup string
}
type defaultMachinesetAwsDescription struct {
name string
namespace string
template string
clustername string
amiID string
availabilityZone string
sgName string
subnet string
iamInstanceProfileID string
}
type pvcDescription struct {
storageSize string
template string
}
type PodDisruptionBudget struct {
name string
namespace string
template string
label string
}
func (pvc *pvcDescription) createPvc(oc *exutil.CLI) {
e2e.Logf("Creating pvc ...")
exutil.CreateNsResourceFromTemplate(oc, "openshift-machine-api", "--ignore-unknown-parameters=true", "-f", pvc.template, "-p", "STORAGESIZE="+pvc.storageSize)
}
func (pvc *pvcDescription) deletePvc(oc *exutil.CLI) error {
e2e.Logf("Deleting pvc ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", "pvc-cloud", "-n", "openshift-machine-api").Execute()
}
func (defaultMachinesetAzure *defaultMachinesetAzureDescription) createDefaultMachineSetOnAzure(oc *exutil.CLI) {
e2e.Logf("Creating azureMachineSet ...")
if err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", defaultMachinesetAzure.template, "-p", "NAME="+defaultMachinesetAzure.name, "NAMESPACE="+machineAPINamespace, "CLUSTERNAME="+defaultMachinesetAzure.clustername, "LOCATION="+defaultMachinesetAzure.location, "VNET="+defaultMachinesetAzure.vnet, "SUBNET="+defaultMachinesetAzure.subnet, "NETWORKRG="+defaultMachinesetAzure.networkResourceGroup); err != nil {
defaultMachinesetAzure.deleteDefaultMachineSetOnAzure(oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
waitForDefaultMachinesRunning(oc, 1, defaultMachinesetAzure.name)
}
}
func (defaultMachinesetAws *defaultMachinesetAwsDescription) createDefaultMachineSetOnAws(oc *exutil.CLI) {
e2e.Logf("Creating awsMachineSet ...")
if err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", defaultMachinesetAws.template, "-p", "NAME="+defaultMachinesetAws.name, "NAMESPACE="+machineAPINamespace, "CLUSTERNAME="+defaultMachinesetAws.clustername, "AMIID="+defaultMachinesetAws.amiID, "AVAILABILITYZONE="+defaultMachinesetAws.availabilityZone, "SGNAME="+defaultMachinesetAws.sgName, "SUBNET="+defaultMachinesetAws.subnet, "IAMINSTANCEPROFILEID="+defaultMachinesetAws.iamInstanceProfileID); err != nil {
defaultMachinesetAws.deleteDefaultMachineSetOnAws(oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
waitForDefaultMachinesRunning(oc, 1, defaultMachinesetAws.name)
}
}
func (defaultMachinesetAzure *defaultMachinesetAzureDescription) deleteDefaultMachineSetOnAzure(oc *exutil.CLI) error {
e2e.Logf("Deleting azureMachineSet ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMachineset, defaultMachinesetAzure.name, "-n", machineAPINamespace).Execute()
}
func (defaultMachinesetAws *defaultMachinesetAwsDescription) deleteDefaultMachineSetOnAws(oc *exutil.CLI) error {
e2e.Logf("Deleting awsMachineSet ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMachineset, defaultMachinesetAws.name, "-n", machineAPINamespace).Execute()
}
// waitForDefaultMachinesRunning check if all the machines are Running in a MachineSet
func waitForDefaultMachinesRunning(oc *exutil.CLI, machineNumber int, machineSetName string) {
e2e.Logf("Waiting for the machines Running ...")
pollErr := wait.Poll(60*time.Second, 960*time.Second, func() (bool, error) {
msg, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machineSetName, "-o=jsonpath={.status.readyReplicas}", "-n", machineAPINamespace).Output()
machinesRunning, _ := strconv.Atoi(msg)
if machinesRunning != machineNumber {
e2e.Logf("Expected %v machine are not Running yet and waiting up to 1 minutes ...", machineNumber)
return false, nil
}
e2e.Logf("Expected %v machines are Running", machineNumber)
return true, nil
})
exutil.AssertWaitPollNoErr(pollErr, fmt.Sprintf("Expected %v machines are not Running after waiting up to 16 minutes ...", machineNumber))
e2e.Logf("All machines are Running ...")
}
func (pdb *PodDisruptionBudget) createPDB(oc *exutil.CLI) {
e2e.Logf("Creating pod disruption budget: %s", pdb.name)
exutil.CreateNsResourceFromTemplate(oc, pdb.namespace, "--ignore-unknown-parameters=true", "-f", pdb.template, "-p", "NAME="+pdb.name, "LABEL="+pdb.label)
}
func (pdb *PodDisruptionBudget) deletePDB(oc *exutil.CLI) {
e2e.Logf("Deleting pod disruption budget: %s", pdb.name)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pdb", pdb.name, "-n", pdb.namespace, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
| package clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 93d7a8ab-c8dd-4223-bef0-dc36db6aaec9 | createPvc | ['pvcDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_util.go | func (pvc *pvcDescription) createPvc(oc *exutil.CLI) {
e2e.Logf("Creating pvc ...")
exutil.CreateNsResourceFromTemplate(oc, "openshift-machine-api", "--ignore-unknown-parameters=true", "-f", pvc.template, "-p", "STORAGESIZE="+pvc.storageSize)
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | bf095c73-a4bd-4132-b161-0506160beb0b | deletePvc | ['pvcDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_util.go | func (pvc *pvcDescription) deletePvc(oc *exutil.CLI) error {
e2e.Logf("Deleting pvc ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", "pvc-cloud", "-n", "openshift-machine-api").Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 1d6e42e9-626c-4865-88f0-18e30ac5496d | createDefaultMachineSetOnAzure | ['defaultMachinesetAzureDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_util.go | func (defaultMachinesetAzure *defaultMachinesetAzureDescription) createDefaultMachineSetOnAzure(oc *exutil.CLI) {
e2e.Logf("Creating azureMachineSet ...")
if err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", defaultMachinesetAzure.template, "-p", "NAME="+defaultMachinesetAzure.name, "NAMESPACE="+machineAPINamespace, "CLUSTERNAME="+defaultMachinesetAzure.clustername, "LOCATION="+defaultMachinesetAzure.location, "VNET="+defaultMachinesetAzure.vnet, "SUBNET="+defaultMachinesetAzure.subnet, "NETWORKRG="+defaultMachinesetAzure.networkResourceGroup); err != nil {
defaultMachinesetAzure.deleteDefaultMachineSetOnAzure(oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
waitForDefaultMachinesRunning(oc, 1, defaultMachinesetAzure.name)
}
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | c0b3dc5b-052e-4bea-b066-f666b69d3aa8 | createDefaultMachineSetOnAws | ['defaultMachinesetAwsDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_util.go | func (defaultMachinesetAws *defaultMachinesetAwsDescription) createDefaultMachineSetOnAws(oc *exutil.CLI) {
e2e.Logf("Creating awsMachineSet ...")
if err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", defaultMachinesetAws.template, "-p", "NAME="+defaultMachinesetAws.name, "NAMESPACE="+machineAPINamespace, "CLUSTERNAME="+defaultMachinesetAws.clustername, "AMIID="+defaultMachinesetAws.amiID, "AVAILABILITYZONE="+defaultMachinesetAws.availabilityZone, "SGNAME="+defaultMachinesetAws.sgName, "SUBNET="+defaultMachinesetAws.subnet, "IAMINSTANCEPROFILEID="+defaultMachinesetAws.iamInstanceProfileID); err != nil {
defaultMachinesetAws.deleteDefaultMachineSetOnAws(oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
waitForDefaultMachinesRunning(oc, 1, defaultMachinesetAws.name)
}
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | e7d4307b-09c7-4183-9d43-8ca7bdd1859e | deleteDefaultMachineSetOnAzure | ['defaultMachinesetAzureDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_util.go | func (defaultMachinesetAzure *defaultMachinesetAzureDescription) deleteDefaultMachineSetOnAzure(oc *exutil.CLI) error {
e2e.Logf("Deleting azureMachineSet ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMachineset, defaultMachinesetAzure.name, "-n", machineAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | c8dc1371-79ab-4409-bd73-1e5d0fa2d661 | deleteDefaultMachineSetOnAws | ['defaultMachinesetAwsDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_util.go | func (defaultMachinesetAws *defaultMachinesetAwsDescription) deleteDefaultMachineSetOnAws(oc *exutil.CLI) error {
e2e.Logf("Deleting awsMachineSet ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMachineset, defaultMachinesetAws.name, "-n", machineAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 5fca03e4-482c-4a05-a5bb-1977f902df81 | waitForDefaultMachinesRunning | ['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_util.go | func waitForDefaultMachinesRunning(oc *exutil.CLI, machineNumber int, machineSetName string) {
e2e.Logf("Waiting for the machines Running ...")
pollErr := wait.Poll(60*time.Second, 960*time.Second, func() (bool, error) {
msg, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machineSetName, "-o=jsonpath={.status.readyReplicas}", "-n", machineAPINamespace).Output()
machinesRunning, _ := strconv.Atoi(msg)
if machinesRunning != machineNumber {
e2e.Logf("Expected %v machine are not Running yet and waiting up to 1 minutes ...", machineNumber)
return false, nil
}
e2e.Logf("Expected %v machines are Running", machineNumber)
return true, nil
})
exutil.AssertWaitPollNoErr(pollErr, fmt.Sprintf("Expected %v machines are not Running after waiting up to 16 minutes ...", machineNumber))
e2e.Logf("All machines are Running ...")
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | d5f798b8-6b42-4b27-95b9-6ad074429f93 | createPDB | ['PodDisruptionBudget'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_util.go | func (pdb *PodDisruptionBudget) createPDB(oc *exutil.CLI) {
e2e.Logf("Creating pod disruption budget: %s", pdb.name)
exutil.CreateNsResourceFromTemplate(oc, pdb.namespace, "--ignore-unknown-parameters=true", "-f", pdb.template, "-p", "NAME="+pdb.name, "LABEL="+pdb.label)
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 14cf7e93-4375-4e35-b015-2206df385f2c | deletePDB | ['PodDisruptionBudget'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/misc_util.go | func (pdb *PodDisruptionBudget) deletePDB(oc *exutil.CLI) {
e2e.Logf("Deleting pod disruption budget: %s", pdb.name)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pdb", pdb.name, "-n", pdb.namespace, "--ignore-not-found=true").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | ||||
file | openshift/openshift-tests-private | 961eb307-8383-4244-aca1-f9bbc42e2221 | util | import (
"math/rand"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/util.go | package clusterinfrastructure
import (
"math/rand"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
const (
machineAPINamespace = "openshift-machine-api"
clusterAPINamespace = "openshift-cluster-api"
machineApproverNamespace = "openshift-cluster-machine-approver"
mapiMachineset = "machinesets.machine.openshift.io"
mapiMachine = "machines.machine.openshift.io"
mapiMHC = "machinehealthchecks.machine.openshift.io"
capiMachineset = "machinesets.cluster.x-k8s.io"
capiMachine = "machines.cluster.x-k8s.io"
defaultTimeout = 300 * time.Second
)
func applyResourceFromTemplate(oc *exutil.CLI, parameters ...string) error {
var jsonCfg string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "cloud.json")
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
jsonCfg = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Applying resources from template is failed")
e2e.Logf("The resource is %s", jsonCfg)
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", jsonCfg).Execute()
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
func skipTestIfSpotWorkers(oc *exutil.CLI) {
machines, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", machineAPINamespace, "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if machines != "" {
g.Skip("This case cannot be tested using spot instance!")
}
}
// Get the cluster history versions
func getClusterHistoryVersions(oc *exutil.CLI) string {
historyVersions, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o=jsonpath={.status.history[*].version}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cluster history versions are %s", historyVersions)
return historyVersions
}
// To be used if sensitive data is present in template
func applyResourceFromTemplateWithoutInfo(oc *exutil.CLI, parameters ...string) error {
var jsonCfg string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().NotShowInfo().Run("process").Args(parameters...).OutputToFile(getRandomString() + "cloud.json")
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
jsonCfg = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Applying resources from template is failed")
e2e.Logf("The resource is %s", jsonCfg)
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", jsonCfg).Execute()
}
func getClusterRegion(oc *exutil.CLI) string {
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", `-ojsonpath={.items[].metadata.labels.topology\.kubernetes\.io/region}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return region
}
| package clusterinfrastructure | ||||
function | openshift/openshift-tests-private | bd55c4e1-2270-4ec3-a0e7-eb672a76501a | applyResourceFromTemplate | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/util.go | func applyResourceFromTemplate(oc *exutil.CLI, parameters ...string) error {
var jsonCfg string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "cloud.json")
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
jsonCfg = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Applying resources from template is failed")
e2e.Logf("The resource is %s", jsonCfg)
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", jsonCfg).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 6ce28e17-3aa5-426c-85eb-9a7b01146cdc | getRandomString | ['"math/rand"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/util.go | func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 06639b59-407f-4619-b651-9a7f2cca570e | skipTestIfSpotWorkers | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/util.go | func skipTestIfSpotWorkers(oc *exutil.CLI) {
machines, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", machineAPINamespace, "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if machines != "" {
g.Skip("This case cannot be tested using spot instance!")
}
} | clusterinfrastructure | |||||
function | openshift/openshift-tests-private | 102c20aa-2074-4e74-b1ee-b600ee703225 | getClusterHistoryVersions | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/util.go | func getClusterHistoryVersions(oc *exutil.CLI) string {
historyVersions, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o=jsonpath={.status.history[*].version}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Cluster history versions are %s", historyVersions)
return historyVersions
} | clusterinfrastructure | |||||
function | openshift/openshift-tests-private | 82a8fa53-09ba-49c9-9f00-c7588e25bbbd | applyResourceFromTemplateWithoutInfo | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/util.go | func applyResourceFromTemplateWithoutInfo(oc *exutil.CLI, parameters ...string) error {
var jsonCfg string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().NotShowInfo().Run("process").Args(parameters...).OutputToFile(getRandomString() + "cloud.json")
if err != nil {
e2e.Failf("the result of ReadFile:%v", err)
return false, nil
}
jsonCfg = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Applying resources from template is failed")
e2e.Logf("The resource is %s", jsonCfg)
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", jsonCfg).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 5b5dafbb-237f-4e37-bb79-ec81918af58f | getClusterRegion | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/util.go | func getClusterRegion(oc *exutil.CLI) string {
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", `-ojsonpath={.items[].metadata.labels.topology\.kubernetes\.io/region}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return region
} | clusterinfrastructure | |||||
test | openshift/openshift-tests-private | 9eeea81d-b417-4091-9896-916950327d64 | cma | import (
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cma.go | package clusterinfrastructure
import (
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure CMA", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLIForKubeOpenShift("cluster-machine-approver" + getRandomString())
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Medium-45420-Cluster Machine Approver should use leader election [Disruptive]", func() {
attemptAcquireLeaderLeaseStr := "attempting to acquire leader lease openshift-cluster-machine-approver/cluster-machine-approver-leader..."
acquiredLeaseStr := "successfully acquired lease openshift-cluster-machine-approver/cluster-machine-approver-leader"
g.By("Check default pod is leader")
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-l", "app=machine-approver", "-n", "openshift-cluster-machine-approver").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(podName) == 0 {
g.Skip("Skip for no pod!")
}
logsOfPod, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args(podName, "-n", "openshift-cluster-machine-approver", "-c", "machine-approver-controller").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(logsOfPod).To(o.ContainSubstring(attemptAcquireLeaderLeaseStr))
o.Expect(logsOfPod).To(o.ContainSubstring(acquiredLeaseStr))
g.By("Delete the default pod")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", podName, "-n", "openshift-cluster-machine-approver").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for new pod ready")
err = wait.Poll(3*time.Second, 60*time.Second, func() (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "machine-approver", "-o=jsonpath={.status.availableReplicas}", "-n", "openshift-cluster-machine-approver").Output()
readyReplicas, _ := strconv.Atoi(output)
if readyReplicas != 1 {
e2e.Logf("The new pod is not ready yet and waiting up to 3 seconds ...")
return false, nil
}
e2e.Logf("The new pod is ready")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "The new pod is not ready after 1 minute")
g.By("Check new pod is leader")
mewPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-l", "app=machine-approver", "-n", "openshift-cluster-machine-approver").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.Poll(3*time.Second, 60*time.Second, func() (bool, error) {
logsOfPod, _ = oc.AsAdmin().WithoutNamespace().Run("logs").Args(mewPodName, "-n", "openshift-cluster-machine-approver", "-c", "machine-approver-controller").Output()
if !strings.Contains(logsOfPod, attemptAcquireLeaderLeaseStr) || !strings.Contains(logsOfPod, acquiredLeaseStr) {
e2e.Logf("The new pod is not acquired lease and waiting up to 3 seconds ...")
return false, nil
}
e2e.Logf("The new pod is acquired lease")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "The new pod is not acquired lease after 1 minute")
})
// author: [email protected]
g.It("Author:zhsun-Medium-64165-Bootstrap kubelet client cert should include system:serviceaccounts group", func() {
csrs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csr", "-o=jsonpath={.items[*].metadata.name}", "--field-selector", "spec.signerName=kubernetes.io/kube-apiserver-client-kubelet").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if csrs != "" {
csrList := strings.Split(csrs, " ")
for _, csr := range csrList {
csrGroups, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csr", csr, "-o=jsonpath={.spec.groups}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(csrGroups, "\"system:serviceaccounts\",\"system:serviceaccounts:openshift-machine-config-operator\",\"system:authenticated\"")).To(o.BeTrue())
}
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Critical-69189-Cluster machine approver metrics should only be available via https", func() {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-l", "app=machine-approver", "-n", "openshift-cluster-machine-approver").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(podName) == 0 {
g.Skip("Skip for no pod!")
}
url_http := "http://127.0.0.0:9191/metrics"
url_https := "https://127.0.0.0:9192/metrics"
curlOutputHttp, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", "openshift-cluster-machine-approver", "-i", "--", "curl", url_http).Output()
o.Expect(curlOutputHttp).To(o.ContainSubstring("Connection refused"))
curlOutputHttps, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", "openshift-cluster-machine-approver", "-i", "--", "curl", url_https).Output()
o.Expect(curlOutputHttps).To(o.ContainSubstring("SSL certificate problem"))
})
// author: [email protected]
g.It("Author:zhsun-HyperShiftMGMT-Medium-45695-MachineApprover is usable with CAPI for guest cluster", func() {
exutil.By("Check disable-status-controller should be in guest cluster machine-approver")
guestClusterName, guestClusterKube, hostedClusterNS := exutil.ValidHypershiftAndGetGuestKubeConf(oc)
maGrgs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "machine-approver", "-o=jsonpath={.spec.template.spec.containers[0].args}", "-n", hostedClusterNS+"-"+guestClusterName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(maGrgs).Should(o.ContainSubstring("disable-status-controller"))
o.Expect(maGrgs).Should(o.ContainSubstring("apigroup=cluster.x-k8s.io"))
o.Expect(maGrgs).Should(o.ContainSubstring("workload-cluster-kubeconfig=/etc/kubernetes/kubeconfig/kubeconfig"))
exutil.By("Check CO machine-approver is disabled")
checkCO, err := oc.AsAdmin().SetGuestKubeconf(guestClusterKube).AsGuestKubeconf().Run("get").Args("co").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(checkCO).ShouldNot(o.ContainSubstring("machine-approver"))
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | 89d1a79a-309f-492b-be52-c84ce0e9b218 | Author:huliu-NonHyperShiftHOST-Medium-45420-Cluster Machine Approver should use leader election [Disruptive] | ['"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cma.go | g.It("Author:huliu-NonHyperShiftHOST-Medium-45420-Cluster Machine Approver should use leader election [Disruptive]", func() {
attemptAcquireLeaderLeaseStr := "attempting to acquire leader lease openshift-cluster-machine-approver/cluster-machine-approver-leader..."
acquiredLeaseStr := "successfully acquired lease openshift-cluster-machine-approver/cluster-machine-approver-leader"
g.By("Check default pod is leader")
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-l", "app=machine-approver", "-n", "openshift-cluster-machine-approver").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(podName) == 0 {
g.Skip("Skip for no pod!")
}
logsOfPod, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args(podName, "-n", "openshift-cluster-machine-approver", "-c", "machine-approver-controller").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(logsOfPod).To(o.ContainSubstring(attemptAcquireLeaderLeaseStr))
o.Expect(logsOfPod).To(o.ContainSubstring(acquiredLeaseStr))
g.By("Delete the default pod")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", podName, "-n", "openshift-cluster-machine-approver").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait for new pod ready")
err = wait.Poll(3*time.Second, 60*time.Second, func() (bool, error) {
output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "machine-approver", "-o=jsonpath={.status.availableReplicas}", "-n", "openshift-cluster-machine-approver").Output()
readyReplicas, _ := strconv.Atoi(output)
if readyReplicas != 1 {
e2e.Logf("The new pod is not ready yet and waiting up to 3 seconds ...")
return false, nil
}
e2e.Logf("The new pod is ready")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "The new pod is not ready after 1 minute")
g.By("Check new pod is leader")
mewPodName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-l", "app=machine-approver", "-n", "openshift-cluster-machine-approver").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = wait.Poll(3*time.Second, 60*time.Second, func() (bool, error) {
logsOfPod, _ = oc.AsAdmin().WithoutNamespace().Run("logs").Args(mewPodName, "-n", "openshift-cluster-machine-approver", "-c", "machine-approver-controller").Output()
if !strings.Contains(logsOfPod, attemptAcquireLeaderLeaseStr) || !strings.Contains(logsOfPod, acquiredLeaseStr) {
e2e.Logf("The new pod is not acquired lease and waiting up to 3 seconds ...")
return false, nil
}
e2e.Logf("The new pod is acquired lease")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "The new pod is not acquired lease after 1 minute")
}) | |||||
test case | openshift/openshift-tests-private | d23ccc83-2d5f-4b2a-b8d4-47efa2bbf988 | Author:zhsun-Medium-64165-Bootstrap kubelet client cert should include system:serviceaccounts group | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cma.go | g.It("Author:zhsun-Medium-64165-Bootstrap kubelet client cert should include system:serviceaccounts group", func() {
csrs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csr", "-o=jsonpath={.items[*].metadata.name}", "--field-selector", "spec.signerName=kubernetes.io/kube-apiserver-client-kubelet").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if csrs != "" {
csrList := strings.Split(csrs, " ")
for _, csr := range csrList {
csrGroups, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csr", csr, "-o=jsonpath={.spec.groups}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(csrGroups, "\"system:serviceaccounts\",\"system:serviceaccounts:openshift-machine-config-operator\",\"system:authenticated\"")).To(o.BeTrue())
}
}
}) | |||||
test case | openshift/openshift-tests-private | 8a8dafe1-a4d9-4955-8f39-f4386858b160 | Author:miyadav-NonHyperShiftHOST-Critical-69189-Cluster machine approver metrics should only be available via https | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cma.go | g.It("Author:miyadav-NonHyperShiftHOST-Critical-69189-Cluster machine approver metrics should only be available via https", func() {
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-l", "app=machine-approver", "-n", "openshift-cluster-machine-approver").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(podName) == 0 {
g.Skip("Skip for no pod!")
}
url_http := "http://127.0.0.0:9191/metrics"
url_https := "https://127.0.0.0:9192/metrics"
curlOutputHttp, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", "openshift-cluster-machine-approver", "-i", "--", "curl", url_http).Output()
o.Expect(curlOutputHttp).To(o.ContainSubstring("Connection refused"))
curlOutputHttps, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", "openshift-cluster-machine-approver", "-i", "--", "curl", url_https).Output()
o.Expect(curlOutputHttps).To(o.ContainSubstring("SSL certificate problem"))
}) | ||||||
test case | openshift/openshift-tests-private | 0e760e12-3aeb-4275-9d83-bf3028d0a637 | Author:zhsun-HyperShiftMGMT-Medium-45695-MachineApprover is usable with CAPI for guest cluster | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/cma.go | g.It("Author:zhsun-HyperShiftMGMT-Medium-45695-MachineApprover is usable with CAPI for guest cluster", func() {
exutil.By("Check disable-status-controller should be in guest cluster machine-approver")
guestClusterName, guestClusterKube, hostedClusterNS := exutil.ValidHypershiftAndGetGuestKubeConf(oc)
maGrgs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "machine-approver", "-o=jsonpath={.spec.template.spec.containers[0].args}", "-n", hostedClusterNS+"-"+guestClusterName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(maGrgs).Should(o.ContainSubstring("disable-status-controller"))
o.Expect(maGrgs).Should(o.ContainSubstring("apigroup=cluster.x-k8s.io"))
o.Expect(maGrgs).Should(o.ContainSubstring("workload-cluster-kubeconfig=/etc/kubernetes/kubeconfig/kubeconfig"))
exutil.By("Check CO machine-approver is disabled")
checkCO, err := oc.AsAdmin().SetGuestKubeconf(guestClusterKube).AsGuestKubeconf().Run("get").Args("co").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(checkCO).ShouldNot(o.ContainSubstring("machine-approver"))
}) | ||||||
test | openshift/openshift-tests-private | efc9f85c-67d5-4844-9f43-662055c685ae | upgrade | import (
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/upgrade.go | package clusterinfrastructure
import (
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure Upgrade", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("cluster-infrastructure-upgrade", exutil.KubeConfigPath())
iaasPlatform clusterinfra.PlatformType
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = clusterinfra.CheckPlatform(oc)
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-PreChkUpgrade-Medium-41804-Spot/preemptible instances should not block upgrade - azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region == "northcentralus" || region == "westus" || region == "usgovvirginia" {
g.Skip("Skip this test scenario because it is not supported on the " + region + " region, because this region doesn't have zones")
}
g.By("Create a spot instance on azure")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-41804"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
ms.CreateMachineSet(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"spotVMOptions":{}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine and node were labelled `interruptible-instance`")
machine, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(machine).NotTo(o.BeEmpty())
node, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(node).NotTo(o.BeEmpty())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-PstChkUpgrade-Medium-41804-Spot/preemptible instances should not block upgrade - azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region == "northcentralus" || region == "westus" || region == "usgovvirginia" {
g.Skip("Skip this test scenario because it is not supported on the " + region + " region, because this region doesn't have zones")
}
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-41804"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
g.By("Check machine and node were still be labelled `interruptible-instance`")
machine, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(machine).NotTo(o.BeEmpty())
node, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(node).NotTo(o.BeEmpty())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-Medium-61086-Enable IMDSv2 on existing worker machines via machine set [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
historyVersions := getClusterHistoryVersions(oc)
if strings.Contains(historyVersions, "4.6") {
g.Skip("Skipping this case due to IMDSv2 is only supported on AWS clusters that were created with version 4.7 or later")
}
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-61086"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with imds required")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"metadataServiceOptions":{"authentication":"Required"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.metadataServiceOptions.authentication}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("Required"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-Medium-62265-Ensure controlplanemachineset is generated automatically after upgrade", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.OpenStack)
cpmsOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace).Output()
e2e.Logf("cpmsOut:%s", cpmsOut)
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-LEVEL0-Critical-22612-Cluster could scale up/down after upgrade [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.VSphere, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud, clusterinfra.Nutanix, clusterinfra.OpenStack)
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-22612"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Scale up machineset")
clusterinfra.ScaleMachineSet(oc, machinesetName, 1)
g.By("Scale down machineset")
clusterinfra.ScaleMachineSet(oc, machinesetName, 0)
})
// author: [email protected]
g.It("Author:zhsun-NonPreRelease-PstChkUpgrade-LEVEL0-Critical-70626-Service of type LoadBalancer can be created successful after upgrade", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud)
if iaasPlatform == clusterinfra.AWS && strings.HasPrefix(getClusterRegion(oc), "us-iso") {
g.Skip("Skipped: There is no public subnet on AWS C2S/SC2S disconnected clusters!")
}
ccmBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "ccm")
loadBalancer := filepath.Join(ccmBaseDir, "svc-loadbalancer.yaml")
loadBalancerService := loadBalancerServiceDescription{
template: loadBalancer,
name: "svc-loadbalancer-70626",
namespace: oc.Namespace(),
}
g.By("Create loadBalancerService")
defer loadBalancerService.deleteLoadBalancerService(oc)
loadBalancerService.createLoadBalancerService(oc)
g.By("Check External-IP assigned")
getLBSvcIP(oc, loadBalancerService)
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-PreChkUpgrade-High-72031-Instances with custom DHCP option set should not block upgrade - AWS [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create a new dhcpOptions")
var newDhcpOptionsID, currentDhcpOptionsID string
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
newDhcpOptionsID, err := awsClient.CreateDhcpOptionsWithDomainName("example72031.com")
if err != nil {
g.Skip("The credential is insufficient to perform create dhcpOptions operation, skip the cases!!")
}
g.By("Associate the VPC with the new dhcpOptionsId")
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
currentDhcpOptionsID, err = awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
err = awsClient.AssociateDhcpOptions(vpcID, newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-72031"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
ms.CreateMachineSet(oc)
//Add a specicacl tag for the original dhcp so that we can find it in PstChkUpgrade case
err = awsClient.CreateTag(currentDhcpOptionsID, infrastructureName, "previousdhcp72031")
o.Expect(err).NotTo(o.HaveOccurred())
machineNameOfMachineSet := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineNameOfMachineSet)
readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readyStatus).Should(o.Equal("True"))
internalDNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machineNameOfMachineSet, "-o=jsonpath={.status.addresses[?(@.type==\"InternalDNS\")].address}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(internalDNS, "example72031.com")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-PstChkUpgrade-High-72031-Instances with custom DHCP option set should not block upgrade - AWs [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-72031"
machineset, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace).Output()
if strings.Contains(machineset, "not found") {
g.Skip("The machineset " + machinesetName + " is not created before upgrade, skip this case!")
}
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
newDhcpOptionsID, err := awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
previousDhcpOptionsID, err := awsClient.GetDhcpOptionsIDFromTag(infrastructureName, "previousdhcp72031")
e2e.Logf("previousDhcpOptionsID:" + strings.Join(previousDhcpOptionsID, "*"))
o.Expect(err).NotTo(o.HaveOccurred())
defer func(dhcpOptionsID []string) {
if len(dhcpOptionsID) > 0 {
e2e.Logf("previousDhcpOptionsID[0]:" + dhcpOptionsID[0])
} else {
e2e.Fail("there is no previousDhcpOptionsID")
}
err := awsClient.DeleteTag(dhcpOptionsID[0], infrastructureName, "previousdhcp72031")
o.Expect(err).NotTo(o.HaveOccurred())
err = awsClient.AssociateDhcpOptions(vpcID, dhcpOptionsID[0])
o.Expect(err).NotTo(o.HaveOccurred())
err = awsClient.DeleteDhcpOptions(newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}(previousDhcpOptionsID)
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
g.By("Check machine is still Running and node is still Ready")
phase, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(phase).Should(o.Equal("Running"))
nodeName := clusterinfra.GetNodeNameFromMachine(oc, clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0])
readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readyStatus).Should(o.Equal("True"))
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | a846bf06-2de1-4e8c-82e3-fdd0785fff8d | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-PreChkUpgrade-Medium-41804-Spot/preemptible instances should not block upgrade - azure [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/upgrade.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-PreChkUpgrade-Medium-41804-Spot/preemptible instances should not block upgrade - azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region == "northcentralus" || region == "westus" || region == "usgovvirginia" {
g.Skip("Skip this test scenario because it is not supported on the " + region + " region, because this region doesn't have zones")
}
g.By("Create a spot instance on azure")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-41804"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
ms.CreateMachineSet(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"spotVMOptions":{}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
g.By("Check machine and node were labelled `interruptible-instance`")
machine, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(machine).NotTo(o.BeEmpty())
node, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(node).NotTo(o.BeEmpty())
}) | |||||
test case | openshift/openshift-tests-private | 998d723d-6a69-42f1-a810-b98228ef27da | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-PstChkUpgrade-Medium-41804-Spot/preemptible instances should not block upgrade - azure [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/upgrade.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-PstChkUpgrade-Medium-41804-Spot/preemptible instances should not block upgrade - azure [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.location}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if region == "northcentralus" || region == "westus" || region == "usgovvirginia" {
g.Skip("Skip this test scenario because it is not supported on the " + region + " region, because this region doesn't have zones")
}
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-41804"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
g.By("Check machine and node were still be labelled `interruptible-instance`")
machine, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(machine).NotTo(o.BeEmpty())
node, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "machine.openshift.io/interruptible-instance=").Output()
o.Expect(node).NotTo(o.BeEmpty())
}) | |||||
test case | openshift/openshift-tests-private | e3892d45-f0b3-4758-b72a-efeba63ef7fb | Author:zhsun-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-Medium-61086-Enable IMDSv2 on existing worker machines via machine set [Disruptive][Slow] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/upgrade.go | g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-Medium-61086-Enable IMDSv2 on existing worker machines via machine set [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
historyVersions := getClusterHistoryVersions(oc)
if strings.Contains(historyVersions, "4.6") {
g.Skip("Skipping this case due to IMDSv2 is only supported on AWS clusters that were created with version 4.7 or later")
}
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-61086"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Update machineset with imds required")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace, "-p", `{"spec":{"replicas":1,"template":{"spec":{"providerSpec":{"value":{"metadataServiceOptions":{"authentication":"Required"}}}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 1, machinesetName)
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].spec.providerSpec.value.metadataServiceOptions.authentication}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.ContainSubstring("Required"))
}) | |||||
test case | openshift/openshift-tests-private | 48e51b42-742d-475a-bf45-4fb85da3ca92 | Author:huliu-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-Medium-62265-Ensure controlplanemachineset is generated automatically after upgrade | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/upgrade.go | g.It("Author:huliu-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-Medium-62265-Ensure controlplanemachineset is generated automatically after upgrade", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.OpenStack)
cpmsOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace).Output()
e2e.Logf("cpmsOut:%s", cpmsOut)
o.Expect(err).NotTo(o.HaveOccurred())
}) | |||||
test case | openshift/openshift-tests-private | cf6aec62-3f6f-408f-b8f7-b12a53329f67 | Author:zhsun-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-LEVEL0-Critical-22612-Cluster could scale up/down after upgrade [Disruptive][Slow] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/upgrade.go | g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-PstChkUpgrade-LEVEL0-Critical-22612-Cluster could scale up/down after upgrade [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.VSphere, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud, clusterinfra.Nutanix, clusterinfra.OpenStack)
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-22612"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
g.By("Scale up machineset")
clusterinfra.ScaleMachineSet(oc, machinesetName, 1)
g.By("Scale down machineset")
clusterinfra.ScaleMachineSet(oc, machinesetName, 0)
}) | |||||
test case | openshift/openshift-tests-private | 097390b9-e833-4098-9462-ce19269b015d | Author:zhsun-NonPreRelease-PstChkUpgrade-LEVEL0-Critical-70626-Service of type LoadBalancer can be created successful after upgrade | ['"path/filepath"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/upgrade.go | g.It("Author:zhsun-NonPreRelease-PstChkUpgrade-LEVEL0-Critical-70626-Service of type LoadBalancer can be created successful after upgrade", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud)
if iaasPlatform == clusterinfra.AWS && strings.HasPrefix(getClusterRegion(oc), "us-iso") {
g.Skip("Skipped: There is no public subnet on AWS C2S/SC2S disconnected clusters!")
}
ccmBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "ccm")
loadBalancer := filepath.Join(ccmBaseDir, "svc-loadbalancer.yaml")
loadBalancerService := loadBalancerServiceDescription{
template: loadBalancer,
name: "svc-loadbalancer-70626",
namespace: oc.Namespace(),
}
g.By("Create loadBalancerService")
defer loadBalancerService.deleteLoadBalancerService(oc)
loadBalancerService.createLoadBalancerService(oc)
g.By("Check External-IP assigned")
getLBSvcIP(oc, loadBalancerService)
}) | |||||
test case | openshift/openshift-tests-private | e6220007-457f-4480-8f52-25130d35bf94 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-PreChkUpgrade-High-72031-Instances with custom DHCP option set should not block upgrade - AWS [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/upgrade.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-PreChkUpgrade-High-72031-Instances with custom DHCP option set should not block upgrade - AWS [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
g.By("Create a new dhcpOptions")
var newDhcpOptionsID, currentDhcpOptionsID string
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
newDhcpOptionsID, err := awsClient.CreateDhcpOptionsWithDomainName("example72031.com")
if err != nil {
g.Skip("The credential is insufficient to perform create dhcpOptions operation, skip the cases!!")
}
g.By("Associate the VPC with the new dhcpOptionsId")
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
currentDhcpOptionsID, err = awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
err = awsClient.AssociateDhcpOptions(vpcID, newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-72031"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 1}
ms.CreateMachineSet(oc)
//Add a specicacl tag for the original dhcp so that we can find it in PstChkUpgrade case
err = awsClient.CreateTag(currentDhcpOptionsID, infrastructureName, "previousdhcp72031")
o.Expect(err).NotTo(o.HaveOccurred())
machineNameOfMachineSet := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0]
nodeName := clusterinfra.GetNodeNameFromMachine(oc, machineNameOfMachineSet)
readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readyStatus).Should(o.Equal("True"))
internalDNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machineNameOfMachineSet, "-o=jsonpath={.status.addresses[?(@.type==\"InternalDNS\")].address}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(internalDNS, "example72031.com")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | b81954a9-af41-47c3-b455-153c849598e0 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-PstChkUpgrade-High-72031-Instances with custom DHCP option set should not block upgrade - AWs [Disruptive] | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/upgrade.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-PstChkUpgrade-High-72031-Instances with custom DHCP option set should not block upgrade - AWs [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-72031"
machineset, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, machinesetName, "-n", machineAPINamespace).Output()
if strings.Contains(machineset, "not found") {
g.Skip("The machineset " + machinesetName + " is not created before upgrade, skip this case!")
}
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
newDhcpOptionsID, err := awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
previousDhcpOptionsID, err := awsClient.GetDhcpOptionsIDFromTag(infrastructureName, "previousdhcp72031")
e2e.Logf("previousDhcpOptionsID:" + strings.Join(previousDhcpOptionsID, "*"))
o.Expect(err).NotTo(o.HaveOccurred())
defer func(dhcpOptionsID []string) {
if len(dhcpOptionsID) > 0 {
e2e.Logf("previousDhcpOptionsID[0]:" + dhcpOptionsID[0])
} else {
e2e.Fail("there is no previousDhcpOptionsID")
}
err := awsClient.DeleteTag(dhcpOptionsID[0], infrastructureName, "previousdhcp72031")
o.Expect(err).NotTo(o.HaveOccurred())
err = awsClient.AssociateDhcpOptions(vpcID, dhcpOptionsID[0])
o.Expect(err).NotTo(o.HaveOccurred())
err = awsClient.DeleteDhcpOptions(newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}(previousDhcpOptionsID)
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
g.By("Check machine is still Running and node is still Ready")
phase, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, "-n", machineAPINamespace, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetName, "-o=jsonpath={.items[0].status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(phase).Should(o.Equal("Running"))
nodeName := clusterinfra.GetNodeNameFromMachine(oc, clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)[0])
readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(readyStatus).Should(o.Equal("True"))
}) | |||||
test | openshift/openshift-tests-private | fcecfbe8-509d-4abb-b03e-42784035f2f7 | capi_machines | import (
"path/filepath"
"strconv"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_machines.go | package clusterinfrastructure
import (
"path/filepath"
"strconv"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure CAPI", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("capi-machines", exutil.KubeConfigPath())
iaasPlatform clusterinfra.PlatformType
clusterID string
region string
profile string
instanceType string
zone string
ami string
subnetName string
subnetID string
sgName string
image string
machineType string
subnetwork string
serviceAccount string
capiBaseDir string
clusterTemplate string
awsMachineTemplateTemplate string
gcpMachineTemplateTemplate string
gcpMachineTemplateTemplatepdbal string
capiMachinesetAWSTemplate string
capiMachinesetgcpTemplate string
capiMachinesetvsphereTemplate string
vsphereMachineTemplateTemplate string
vsphere_server string
diskGiB string
int_diskGiB int
datacenter string
datastore string
machineTemplate string
folder string
resourcePool string
numCPUs string
int_numCPUs int
networkname string
memoryMiB string
int_memoryMiB int
err error
cluster clusterDescription
awsMachineTemplate awsMachineTemplateDescription
gcpMachineTemplate gcpMachineTemplateDescription
gcpMachineTemplatepdbal gcpMachineTemplateDescription
capiMachineSetAWS capiMachineSetAWSDescription
capiMachineSetgcp capiMachineSetgcpDescription
clusterNotInCapi clusterDescriptionNotInCapi
vsphereMachineTemplate vsphereMachineTemplateDescription
capiMachineSetvsphere capiMachineSetvsphereDescription
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
clusterinfra.SkipConditionally(oc)
iaasPlatform = clusterinfra.CheckPlatform(oc)
switch iaasPlatform {
case clusterinfra.AWS:
region, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
profile, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.iamInstanceProfile.id}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
instanceType, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.instanceType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
zone, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.placement.availabilityZone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
ami, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.ami.id}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
subnetName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.subnet.filters[0].values[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
subnetID, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.subnet.id}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
sgName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.securityGroups[0].filters[0].values[0]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
case clusterinfra.GCP:
region, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.gcp.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
zone, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.zone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
image, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.disks[0].image}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
machineType, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.machineType}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
subnetwork, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.networkInterfaces[0].subnetwork}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
serviceAccount, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.serviceAccounts[0].email}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
case clusterinfra.VSphere:
vsphere_server, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.spec.platformSpec.vsphere.vcenters[0].server}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
randomMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
diskGiB, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.diskGiB}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
int_diskGiB, err = strconv.Atoi(diskGiB)
datacenter, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.workspace.datacenter}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
machineTemplate, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.template}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
datastore, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.workspace.datastore}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
folder, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.workspace.folder}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
resourcePool, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.workspace.resourcePool}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
numCPUs, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.numCPUs}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
int_numCPUs, err = strconv.Atoi(numCPUs)
networkname, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.network.devices[0].networkName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
memoryMiB, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, randomMachinesetName, "-n", machineAPINamespace, "-o=jsonpath={.spec.template.spec.providerSpec.value.memoryMiB}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
int_memoryMiB, err = strconv.Atoi(memoryMiB)
default:
g.Skip("IAAS platform is " + iaasPlatform.String() + " which is NOT supported cluster api ...")
}
clusterID = clusterinfra.GetInfrastructureName(oc)
capiBaseDir = exutil.FixturePath("testdata", "clusterinfrastructure", "capi")
clusterTemplate = filepath.Join(capiBaseDir, "cluster.yaml")
if subnetName != "" {
awsMachineTemplateTemplate = filepath.Join(capiBaseDir, "machinetemplate-aws.yaml")
} else {
awsMachineTemplateTemplate = filepath.Join(capiBaseDir, "machinetemplate-aws-id.yaml")
}
gcpMachineTemplateTemplate = filepath.Join(capiBaseDir, "machinetemplate-gcp.yaml")
gcpMachineTemplateTemplatepdbal = filepath.Join(capiBaseDir, "machinetemplate-gcp-pd-bal.yaml")
capiMachinesetAWSTemplate = filepath.Join(capiBaseDir, "machinesetaws.yaml")
capiMachinesetgcpTemplate = filepath.Join(capiBaseDir, "machinesetgcp.yaml")
vsphereMachineTemplateTemplate = filepath.Join(capiBaseDir, "machinetemplate-vsphere.yaml")
capiMachinesetvsphereTemplate = filepath.Join(capiBaseDir, "machinesetvsphere.yaml")
cluster = clusterDescription{
name: clusterID,
template: clusterTemplate,
}
clusterNotInCapi = clusterDescriptionNotInCapi{
name: clusterID,
namespace: "openshift-machine-api",
template: clusterTemplate,
}
awsMachineTemplate = awsMachineTemplateDescription{
name: "aws-machinetemplate",
profile: profile,
instanceType: instanceType,
zone: zone,
ami: ami,
subnetName: subnetName,
sgName: sgName,
subnetID: subnetID,
template: awsMachineTemplateTemplate,
}
gcpMachineTemplate = gcpMachineTemplateDescription{
name: "gcp-machinetemplate",
region: region,
image: image,
machineType: machineType,
subnetwork: subnetwork,
serviceAccount: serviceAccount,
clusterID: clusterID,
template: gcpMachineTemplateTemplate,
}
//gcpMachineTemplateTemplate-pd-bal
gcpMachineTemplatepdbal = gcpMachineTemplateDescription{
name: "gcp-machinetemplate",
region: region,
image: image,
machineType: machineType,
subnetwork: subnetwork,
serviceAccount: serviceAccount,
clusterID: clusterID,
template: gcpMachineTemplateTemplatepdbal,
}
capiMachineSetAWS = capiMachineSetAWSDescription{
name: "capi-machineset",
clusterName: clusterID,
template: capiMachinesetAWSTemplate,
replicas: 1,
}
capiMachineSetgcp = capiMachineSetgcpDescription{
name: "capi-machineset-gcp",
clusterName: clusterID,
template: capiMachinesetgcpTemplate,
failureDomain: zone,
replicas: 1,
}
capiMachineSetvsphere = capiMachineSetvsphereDescription{
name: "capi-machineset-vsphere",
clusterName: clusterID,
template: capiMachinesetvsphereTemplate,
dataSecretName: "worker-user-data",
replicas: 1,
}
vsphereMachineTemplate = vsphereMachineTemplateDescription{
kind: "VSphereMachineTemplate",
name: clusterID,
namespace: "openshift-cluster-api",
server: vsphere_server,
diskGiB: int_diskGiB,
datacenter: datacenter,
machineTemplate: machineTemplate,
datastore: datastore,
folder: folder,
resourcePool: resourcePool,
numCPUs: int_numCPUs,
memoryMiB: int_memoryMiB,
dhcp: true,
networkName: networkname,
template: vsphereMachineTemplateTemplate,
}
switch iaasPlatform {
case clusterinfra.AWS:
cluster.kind = "AWSCluster"
clusterNotInCapi.kind = "AWSCluster"
capiMachineSetAWS.kind = "AWSMachineTemplate"
capiMachineSetAWS.machineTemplateName = awsMachineTemplate.name
case clusterinfra.GCP:
cluster.kind = "GCPCluster"
clusterNotInCapi.kind = "GCPCluster"
capiMachineSetgcp.kind = "GCPMachineTemplate"
capiMachineSetgcp.machineTemplateName = gcpMachineTemplate.name
capiMachineSetgcp.failureDomain = zone
case clusterinfra.VSphere:
cluster.kind = "VSphereCluster"
capiMachineSetvsphere.kind = "VSphereMachineTemplate"
capiMachineSetvsphere.machineTemplateName = vsphereMachineTemplate.name
capiMachineSetvsphere.dataSecretName = ""
default:
g.Skip("IAAS platform is " + iaasPlatform.String() + " which is NOT supported cluster api ...")
}
})
g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-High-51071-Create machineset with CAPI on aws [Disruptive][Slow]", func() {
g.By("Check if cluster api on this platform is supported")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
skipForCAPINotExist(oc)
g.By("Create capi machineset")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)*/
defer awsMachineTemplate.deleteAWSMachineTemplate(oc)
awsMachineTemplate.createAWSMachineTemplate(oc)
capiMachineSetAWS.name = "capi-machineset-51071"
defer waitForCapiMachinesDisapper(oc, capiMachineSetAWS.name)
defer capiMachineSetAWS.deleteCapiMachineSet(oc)
capiMachineSetAWS.createCapiMachineSet(oc)
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-cluster-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = matchProviderIDWithNode(oc, capiMachine, machineName, "openshift-cluster-api")
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-High-53100-Medium-74794-Create machineset with CAPI on gcp [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
skipForCAPINotExist(oc)
g.By("Create capi machineset")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)*/
// rootDeviceTypes included to cover multiple cases
rootDeviceTypes := map[string]string{
// "pd-ssd": "53100", This is now covered in cluster-actuator-pkg-tests pd-balanced is not possible due to change need to a vendor file"
// We can leave rootDeviceTypes as map to accomodate any type like pd-balanced later
"pd-balanced": "74794",
}
for rootDeviceType, machineNameSuffix := range rootDeviceTypes {
g.By("Patching GCPMachineTemplate with rootDeviceType: " + rootDeviceType)
if rootDeviceType == "pd-ssd" {
gcpMachineTemplate.createGCPMachineTemplate(oc)
} else if rootDeviceType == "pd-balanced" {
gcpMachineTemplatepdbal.createGCPMachineTemplatePdBal(oc)
}
capiMachineSetgcp.name = "capi-machineset-" + machineNameSuffix
defer waitForCapiMachinesDisappergcp(oc, capiMachineSetgcp.name)
defer capiMachineSetgcp.deleteCapiMachineSetgcp(oc)
capiMachineSetgcp.createCapiMachineSetgcp(oc)
// Retrieve the machine name and validate it with the node
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-cluster-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Match the provider ID with the node for verification
_, err = matchProviderIDWithNode(oc, capiMachine, machineName, "openshift-cluster-api")
o.Expect(err).NotTo(o.HaveOccurred())
// gcpmachinetemplate is immutable
capiMachineSetgcp.deleteCapiMachineSetgcp(oc)
waitForCapiMachinesDisappergcp(oc, capiMachineSetgcp.name)
if rootDeviceType == "pd-ssd" {
gcpMachineTemplate.deleteGCPMachineTemplate(oc)
} else if rootDeviceType == "pd-balanced" {
gcpMachineTemplatepdbal.deleteGCPMachineTemplate(oc)
}
}
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-55205-Webhook validations for CAPI [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP)
skipForCAPINotExist(oc)
g.By("Shouldn't allow to create/update cluster with invalid kind")
clusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cluster", "-n", clusterAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(clusters) == 0 {
cluster.createCluster(oc)
}
clusterKind, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cluster", cluster.name, "-n", clusterAPINamespace, "-p", `{"spec":{"infrastructureRef":{"kind":"invalid"}}}`, "--type=merge").Output()
o.Expect(clusterKind).To(o.ContainSubstring("invalid"))
g.By("Shouldn't allow to delete cluster")
clusterDelete, _ := oc.AsAdmin().WithoutNamespace().Run("delete").Args("cluster", cluster.name, "-n", clusterAPINamespace).Output()
o.Expect(clusterDelete).To(o.ContainSubstring("deletion of cluster is not allowed"))
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-High-69188-cluster object can be deleted in non-cluster-api namespace [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP)
skipForCAPINotExist(oc)
g.By("Create cluster object in namespace other than openshift-cluster-api")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("cluster", clusterNotInCapi.name, "-n", clusterNotInCapi.namespace).Execute()
clusterNotInCapi.createClusterNotInCapiNamespace(oc)
g.By("Deleting cluster object in namespace other than openshift-cluster-api, should be successful")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("cluster", clusterNotInCapi.name, "-n", clusterNotInCapi.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-62928-Enable IMDSv2 on existing worker machines via machine set [Disruptive][Slow]", func() {
g.By("Check if cluster api on this platform is supported")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
skipForCAPINotExist(oc)
g.By("Create cluster, awscluster, awsmachinetemplate")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)
//OCPCLOUD-2204
defer awscluster.deleteAWSCluster(oc)
awscluster.createAWSCluster(oc)*/
defer awsMachineTemplate.deleteAWSMachineTemplate(oc)
awsMachineTemplate.createAWSMachineTemplate(oc)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("awsmachinetemplate", capiMachineSetAWS.machineTemplateName, "-n", clusterAPINamespace, "-p", `{"spec":{"template":{"spec":{"instanceMetadataOptions":{"httpEndpoint":"enabled","httpPutResponseHopLimit":1,"httpTokens":"required","instanceMetadataTags":"disabled"}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check machineTemplate with httpTokens: required")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("awsmachinetemplate", capiMachineSetAWS.machineTemplateName, "-n", clusterAPINamespace, "-o=jsonpath={.spec.template.spec.instanceMetadataOptions.httpTokens}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("required"))
g.By("Create capi machineset with IMDSv2")
capiMachineSetAWS.name = "capi-machineset-62928"
defer waitForCapiMachinesDisapper(oc, capiMachineSetAWS.name)
defer capiMachineSetAWS.deleteCapiMachineSet(oc)
capiMachineSetAWS.createCapiMachineSet(oc)
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-NonPreRelease-Longduration-High-72433-Create machineset with CAPI on vsphere [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.VSphere)
skipForCAPINotExist(oc)
g.By("Create capi machineset")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)*/
defer vsphereMachineTemplate.deletevsphereMachineTemplate(oc)
vsphereMachineTemplate.createvsphereMachineTemplate(oc)
capiMachineSetvsphere.name = "capi-machineset-72433"
capiMachineSetvsphere.createCapiMachineSetvsphere(oc)
defer waitForCapiMachinesDisapper(oc, capiMachineSetvsphere.name)
defer capiMachineSetvsphere.deleteCapiMachineSetvsphere(oc)
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-cluster-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = matchProviderIDWithNode(oc, capiMachine, machineName, "openshift-cluster-api")
o.Expect(err).NotTo(o.HaveOccurred())
})
g.It("Author:huliu-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-74803-[CAPI] Support AWS Placement Group Partition Number [Disruptive][Slow]", func() {
g.By("Check if cluster api on this platform is supported")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
skipForCAPINotExist(oc)
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
_, err := awsClient.GetPlacementGroupByName("pgpartition3")
if err != nil {
g.Skip("There is no this placement group for testing, skip the cases!!")
}
g.By("Create capi machineset")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)*/
awsMachineTemplate.placementGroupName = "pgpartition3"
awsMachineTemplate.placementGroupPartition = 3
defer awsMachineTemplate.deleteAWSMachineTemplate(oc)
awsMachineTemplate.createAWSMachineTemplate(oc)
g.By("Check machineTemplate with placementGroupName: pgpartition3 and placementGroupPartition: 3")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("awsmachinetemplate", capiMachineSetAWS.machineTemplateName, "-n", clusterAPINamespace, "-o=jsonpath={.spec.template.spec.placementGroupName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("pgpartition3"))
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("awsmachinetemplate", capiMachineSetAWS.machineTemplateName, "-n", clusterAPINamespace, "-o=jsonpath={.spec.template.spec.placementGroupPartition}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("3"))
capiMachineSetAWS.name = "capi-machineset-74803"
defer waitForCapiMachinesDisapper(oc, capiMachineSetAWS.name)
defer capiMachineSetAWS.deleteCapiMachineSet(oc)
capiMachineSetAWS.createCapiMachineSet(oc)
})
g.It("Author:huliu-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-76088-[CAPI] New machine can join cluster when VPC has custom DHCP option set [Disruptive][Slow]", func() {
g.By("Check if cluster api on this platform is supported")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
skipForCAPINotExist(oc)
g.By("Create a new dhcpOptions")
var newDhcpOptionsID, currentDhcpOptionsID string
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
newDhcpOptionsID, err := awsClient.CreateDhcpOptionsWithDomainName("capi76088-CAPI.com.")
if err != nil {
g.Skip("The credential is insufficient to perform create dhcpOptions operation, skip the cases!!")
}
defer func() {
err := awsClient.DeleteDhcpOptions(newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Associate the VPC with the new dhcpOptionsId")
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
currentDhcpOptionsID, err = awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := awsClient.AssociateDhcpOptions(vpcID, currentDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = awsClient.AssociateDhcpOptions(vpcID, newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create capi machineset")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)*/
defer awsMachineTemplate.deleteAWSMachineTemplate(oc)
awsMachineTemplate.createAWSMachineTemplate(oc)
capiMachineSetAWS.name = "capi-machineset-76088"
defer waitForCapiMachinesDisapper(oc, capiMachineSetAWS.name)
defer capiMachineSetAWS.deleteCapiMachineSet(oc)
capiMachineSetAWS.createCapiMachineSet(oc)
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | 41042280-3440-4a5a-a2c3-86b32cf190c8 | Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-High-51071-Create machineset with CAPI on aws [Disruptive][Slow] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_machines.go | g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-High-51071-Create machineset with CAPI on aws [Disruptive][Slow]", func() {
g.By("Check if cluster api on this platform is supported")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
skipForCAPINotExist(oc)
g.By("Create capi machineset")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)*/
defer awsMachineTemplate.deleteAWSMachineTemplate(oc)
awsMachineTemplate.createAWSMachineTemplate(oc)
capiMachineSetAWS.name = "capi-machineset-51071"
defer waitForCapiMachinesDisapper(oc, capiMachineSetAWS.name)
defer capiMachineSetAWS.deleteCapiMachineSet(oc)
capiMachineSetAWS.createCapiMachineSet(oc)
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-cluster-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = matchProviderIDWithNode(oc, capiMachine, machineName, "openshift-cluster-api")
o.Expect(err).NotTo(o.HaveOccurred())
}) | |||||
test case | openshift/openshift-tests-private | f0fef40e-1cee-459a-90b9-0fb75ee1a69a | Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-High-53100-Medium-74794-Create machineset with CAPI on gcp [Disruptive][Slow] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_machines.go | g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-High-53100-Medium-74794-Create machineset with CAPI on gcp [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
skipForCAPINotExist(oc)
g.By("Create capi machineset")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)*/
// rootDeviceTypes included to cover multiple cases
rootDeviceTypes := map[string]string{
// "pd-ssd": "53100", This is now covered in cluster-actuator-pkg-tests pd-balanced is not possible due to change need to a vendor file"
// We can leave rootDeviceTypes as map to accomodate any type like pd-balanced later
"pd-balanced": "74794",
}
for rootDeviceType, machineNameSuffix := range rootDeviceTypes {
g.By("Patching GCPMachineTemplate with rootDeviceType: " + rootDeviceType)
if rootDeviceType == "pd-ssd" {
gcpMachineTemplate.createGCPMachineTemplate(oc)
} else if rootDeviceType == "pd-balanced" {
gcpMachineTemplatepdbal.createGCPMachineTemplatePdBal(oc)
}
capiMachineSetgcp.name = "capi-machineset-" + machineNameSuffix
defer waitForCapiMachinesDisappergcp(oc, capiMachineSetgcp.name)
defer capiMachineSetgcp.deleteCapiMachineSetgcp(oc)
capiMachineSetgcp.createCapiMachineSetgcp(oc)
// Retrieve the machine name and validate it with the node
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-cluster-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Match the provider ID with the node for verification
_, err = matchProviderIDWithNode(oc, capiMachine, machineName, "openshift-cluster-api")
o.Expect(err).NotTo(o.HaveOccurred())
// gcpmachinetemplate is immutable
capiMachineSetgcp.deleteCapiMachineSetgcp(oc)
waitForCapiMachinesDisappergcp(oc, capiMachineSetgcp.name)
if rootDeviceType == "pd-ssd" {
gcpMachineTemplate.deleteGCPMachineTemplate(oc)
} else if rootDeviceType == "pd-balanced" {
gcpMachineTemplatepdbal.deleteGCPMachineTemplate(oc)
}
}
}) | |||||
test case | openshift/openshift-tests-private | 855a7c3c-d9c6-4c5a-acd6-b37e37f2f365 | Author:zhsun-NonHyperShiftHOST-Medium-55205-Webhook validations for CAPI [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_machines.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-55205-Webhook validations for CAPI [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP)
skipForCAPINotExist(oc)
g.By("Shouldn't allow to create/update cluster with invalid kind")
clusters, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cluster", "-n", clusterAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(clusters) == 0 {
cluster.createCluster(oc)
}
clusterKind, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cluster", cluster.name, "-n", clusterAPINamespace, "-p", `{"spec":{"infrastructureRef":{"kind":"invalid"}}}`, "--type=merge").Output()
o.Expect(clusterKind).To(o.ContainSubstring("invalid"))
g.By("Shouldn't allow to delete cluster")
clusterDelete, _ := oc.AsAdmin().WithoutNamespace().Run("delete").Args("cluster", cluster.name, "-n", clusterAPINamespace).Output()
o.Expect(clusterDelete).To(o.ContainSubstring("deletion of cluster is not allowed"))
}) | |||||
test case | openshift/openshift-tests-private | dd01b3a2-b3b2-4685-a32c-32fd8b6679a8 | Author:miyadav-NonHyperShiftHOST-High-69188-cluster object can be deleted in non-cluster-api namespace [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_machines.go | g.It("Author:miyadav-NonHyperShiftHOST-High-69188-cluster object can be deleted in non-cluster-api namespace [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP)
skipForCAPINotExist(oc)
g.By("Create cluster object in namespace other than openshift-cluster-api")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("cluster", clusterNotInCapi.name, "-n", clusterNotInCapi.namespace).Execute()
clusterNotInCapi.createClusterNotInCapiNamespace(oc)
g.By("Deleting cluster object in namespace other than openshift-cluster-api, should be successful")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("cluster", clusterNotInCapi.name, "-n", clusterNotInCapi.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}) | |||||
test case | openshift/openshift-tests-private | 273eba6e-9d29-4c98-bfcc-55f8792eb8ca | Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-62928-Enable IMDSv2 on existing worker machines via machine set [Disruptive][Slow] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_machines.go | g.It("Author:zhsun-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-62928-Enable IMDSv2 on existing worker machines via machine set [Disruptive][Slow]", func() {
g.By("Check if cluster api on this platform is supported")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
skipForCAPINotExist(oc)
g.By("Create cluster, awscluster, awsmachinetemplate")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)
//OCPCLOUD-2204
defer awscluster.deleteAWSCluster(oc)
awscluster.createAWSCluster(oc)*/
defer awsMachineTemplate.deleteAWSMachineTemplate(oc)
awsMachineTemplate.createAWSMachineTemplate(oc)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("awsmachinetemplate", capiMachineSetAWS.machineTemplateName, "-n", clusterAPINamespace, "-p", `{"spec":{"template":{"spec":{"instanceMetadataOptions":{"httpEndpoint":"enabled","httpPutResponseHopLimit":1,"httpTokens":"required","instanceMetadataTags":"disabled"}}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check machineTemplate with httpTokens: required")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("awsmachinetemplate", capiMachineSetAWS.machineTemplateName, "-n", clusterAPINamespace, "-o=jsonpath={.spec.template.spec.instanceMetadataOptions.httpTokens}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("required"))
g.By("Create capi machineset with IMDSv2")
capiMachineSetAWS.name = "capi-machineset-62928"
defer waitForCapiMachinesDisapper(oc, capiMachineSetAWS.name)
defer capiMachineSetAWS.deleteCapiMachineSet(oc)
capiMachineSetAWS.createCapiMachineSet(oc)
}) | |||||
test case | openshift/openshift-tests-private | 786def64-692d-454c-91a4-b0d38f289385 | Author:miyadav-NonHyperShiftHOST-NonPreRelease-Longduration-High-72433-Create machineset with CAPI on vsphere [Disruptive][Slow] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_machines.go | g.It("Author:miyadav-NonHyperShiftHOST-NonPreRelease-Longduration-High-72433-Create machineset with CAPI on vsphere [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.VSphere)
skipForCAPINotExist(oc)
g.By("Create capi machineset")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)*/
defer vsphereMachineTemplate.deletevsphereMachineTemplate(oc)
vsphereMachineTemplate.createvsphereMachineTemplate(oc)
capiMachineSetvsphere.name = "capi-machineset-72433"
capiMachineSetvsphere.createCapiMachineSetvsphere(oc)
defer waitForCapiMachinesDisapper(oc, capiMachineSetvsphere.name)
defer capiMachineSetvsphere.deleteCapiMachineSetvsphere(oc)
machineName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-cluster-api").Output()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = matchProviderIDWithNode(oc, capiMachine, machineName, "openshift-cluster-api")
o.Expect(err).NotTo(o.HaveOccurred())
}) | |||||
test case | openshift/openshift-tests-private | c397e290-61e3-4844-bac3-346672d438ea | Author:huliu-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-74803-[CAPI] Support AWS Placement Group Partition Number [Disruptive][Slow] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_machines.go | g.It("Author:huliu-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-74803-[CAPI] Support AWS Placement Group Partition Number [Disruptive][Slow]", func() {
g.By("Check if cluster api on this platform is supported")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
skipForCAPINotExist(oc)
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
_, err := awsClient.GetPlacementGroupByName("pgpartition3")
if err != nil {
g.Skip("There is no this placement group for testing, skip the cases!!")
}
g.By("Create capi machineset")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)*/
awsMachineTemplate.placementGroupName = "pgpartition3"
awsMachineTemplate.placementGroupPartition = 3
defer awsMachineTemplate.deleteAWSMachineTemplate(oc)
awsMachineTemplate.createAWSMachineTemplate(oc)
g.By("Check machineTemplate with placementGroupName: pgpartition3 and placementGroupPartition: 3")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("awsmachinetemplate", capiMachineSetAWS.machineTemplateName, "-n", clusterAPINamespace, "-o=jsonpath={.spec.template.spec.placementGroupName}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("pgpartition3"))
out, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("awsmachinetemplate", capiMachineSetAWS.machineTemplateName, "-n", clusterAPINamespace, "-o=jsonpath={.spec.template.spec.placementGroupPartition}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).Should(o.Equal("3"))
capiMachineSetAWS.name = "capi-machineset-74803"
defer waitForCapiMachinesDisapper(oc, capiMachineSetAWS.name)
defer capiMachineSetAWS.deleteCapiMachineSet(oc)
capiMachineSetAWS.createCapiMachineSet(oc)
}) | |||||
test case | openshift/openshift-tests-private | a40209ad-574a-484e-8f11-53bd8d64c476 | Author:huliu-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-76088-[CAPI] New machine can join cluster when VPC has custom DHCP option set [Disruptive][Slow] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_machines.go | g.It("Author:huliu-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-76088-[CAPI] New machine can join cluster when VPC has custom DHCP option set [Disruptive][Slow]", func() {
g.By("Check if cluster api on this platform is supported")
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
skipForCAPINotExist(oc)
g.By("Create a new dhcpOptions")
var newDhcpOptionsID, currentDhcpOptionsID string
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
newDhcpOptionsID, err := awsClient.CreateDhcpOptionsWithDomainName("capi76088-CAPI.com.")
if err != nil {
g.Skip("The credential is insufficient to perform create dhcpOptions operation, skip the cases!!")
}
defer func() {
err := awsClient.DeleteDhcpOptions(newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
g.By("Associate the VPC with the new dhcpOptionsId")
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
currentDhcpOptionsID, err = awsClient.GetDhcpOptionsIDOfVpc(vpcID)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
err := awsClient.AssociateDhcpOptions(vpcID, currentDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
}()
err = awsClient.AssociateDhcpOptions(vpcID, newDhcpOptionsID)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create capi machineset")
/*create cluster no longer necessary - OCPCLOUD-2202
cluster.createCluster(oc)*/
defer awsMachineTemplate.deleteAWSMachineTemplate(oc)
awsMachineTemplate.createAWSMachineTemplate(oc)
capiMachineSetAWS.name = "capi-machineset-76088"
defer waitForCapiMachinesDisapper(oc, capiMachineSetAWS.name)
defer capiMachineSetAWS.deleteCapiMachineSet(oc)
capiMachineSetAWS.createCapiMachineSet(oc)
}) | |||||
file | openshift/openshift-tests-private | f85352a8-ee33-4773-a276-5572e75e0ea6 | capi_utils | import (
"fmt"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | package clusterinfrastructure
import (
"fmt"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type clusterDescription struct {
name string
namespace string
kind string
template string
}
type clusterDescriptionNotInCapi struct {
name string
namespace string
kind string
template string
}
type awsMachineTemplateDescription struct {
name string
namespace string
profile string
instanceType string
zone string
ami string
subnetName string
subnetID string
sgName string
template string
placementGroupName string
placementGroupPartition int
}
type gcpMachineTemplateDescription struct {
name string
namespace string
region string
image string
machineType string
clusterID string
subnetwork string
serviceAccount string
template string
}
type capiMachineSetAWSDescription struct {
name string
namespace string
clusterName string
kind string
replicas int
machineTemplateName string
template string
}
type capiMachineSetgcpDescription struct {
name string
namespace string
clusterName string
kind string
replicas int
machineTemplateName string
template string
failureDomain string
}
type capiMachineSetvsphereDescription struct {
name string
namespace string
clusterName string
kind string
replicas int
machineTemplateName string
template string
dataSecretName string
}
type vsphereMachineTemplateDescription struct {
kind string
name string
namespace string
server string
diskGiB int
datacenter string
datastore string
folder string
resourcePool string
numCPUs int
memoryMiB int
dhcp bool
networkName string
template string
cloneMode string
machineTemplate string
}
// skipForCAPINotExist skip the test if capi doesn't exist
func skipForCAPINotExist(oc *exutil.CLI) {
capi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", clusterAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if err != nil || len(capi) == 0 {
g.Skip("Skip for cluster api is not deployed!")
}
}
func (cluster *clusterDescription) createCluster(oc *exutil.CLI) {
e2e.Logf("Creating cluster ...")
err := applyResourceFromTemplate(oc, "-f", cluster.template, "-p", "NAME="+cluster.name, "NAMESPACE="+clusterAPINamespace, "KIND="+cluster.kind)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (clusterNotInCapi *clusterDescriptionNotInCapi) createClusterNotInCapiNamespace(oc *exutil.CLI) {
e2e.Logf("Creating cluster in namepsace not openshift-cluster-api ...")
err := applyResourceFromTemplate(oc, "-f", clusterNotInCapi.template, "-p", "NAME="+clusterNotInCapi.name, "NAMESPACE="+clusterNotInCapi.namespace, "KIND="+clusterNotInCapi.kind)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (awsMachineTemplate *awsMachineTemplateDescription) createAWSMachineTemplate(oc *exutil.CLI) {
e2e.Logf("Creating awsMachineTemplate ...")
if awsMachineTemplate.placementGroupPartition != 0 {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", awsMachineTemplate.template, "-p", "NAME="+awsMachineTemplate.name, "NAMESPACE="+clusterAPINamespace, "PROFILE="+awsMachineTemplate.profile, "INSTANCETYPE="+awsMachineTemplate.instanceType, "ZONE="+awsMachineTemplate.zone, "AMI="+awsMachineTemplate.ami, "SUBNETNAME="+awsMachineTemplate.subnetName, "SUBNETID="+awsMachineTemplate.subnetID, "SGNAME="+awsMachineTemplate.sgName, "PLACEMENTGROUPNAME="+awsMachineTemplate.placementGroupName, "PLACEMENTGROUPPARTITION="+strconv.Itoa(awsMachineTemplate.placementGroupPartition))
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", awsMachineTemplate.template, "-p", "NAME="+awsMachineTemplate.name, "NAMESPACE="+clusterAPINamespace, "PROFILE="+awsMachineTemplate.profile, "INSTANCETYPE="+awsMachineTemplate.instanceType, "ZONE="+awsMachineTemplate.zone, "AMI="+awsMachineTemplate.ami, "SUBNETNAME="+awsMachineTemplate.subnetName, "SUBNETID="+awsMachineTemplate.subnetID, "SGNAME="+awsMachineTemplate.sgName, "PLACEMENTGROUPNAME="+awsMachineTemplate.placementGroupName, "PLACEMENTGROUPPARTITION=null")
o.Expect(err).NotTo(o.HaveOccurred())
}
}
func (awsMachineTemplate *awsMachineTemplateDescription) deleteAWSMachineTemplate(oc *exutil.CLI) error {
e2e.Logf("Deleting awsMachineTemplate ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("awsmachinetemplate", awsMachineTemplate.name, "-n", clusterAPINamespace).Execute()
}
func (gcpMachineTemplate *gcpMachineTemplateDescription) createGCPMachineTemplate(oc *exutil.CLI) {
e2e.Logf("Creating gcpMachineTemplate ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", gcpMachineTemplate.template, "-p", "NAME="+gcpMachineTemplate.name, "NAMESPACE="+clusterAPINamespace, "IMAGE="+gcpMachineTemplate.image, "REGION="+gcpMachineTemplate.region, "CLUSTERID="+gcpMachineTemplate.clusterID, "MACHINETYPE="+gcpMachineTemplate.machineType, "SUBNETWORK="+gcpMachineTemplate.subnetwork, "SERVICEACCOUNT="+gcpMachineTemplate.serviceAccount)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (gcpMachineTemplatepdbal *gcpMachineTemplateDescription) createGCPMachineTemplatePdBal(oc *exutil.CLI) {
e2e.Logf("Creating gcpMachineTemplate with pd balanced disk...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", gcpMachineTemplatepdbal.template, "-p", "NAME="+gcpMachineTemplatepdbal.name, "NAMESPACE="+clusterAPINamespace, "IMAGE="+gcpMachineTemplatepdbal.image, "REGION="+gcpMachineTemplatepdbal.region, "CLUSTERID="+gcpMachineTemplatepdbal.clusterID, "MACHINETYPE="+gcpMachineTemplatepdbal.machineType, "SUBNETWORK="+gcpMachineTemplatepdbal.subnetwork, "SERVICEACCOUNT="+gcpMachineTemplatepdbal.serviceAccount)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (gcpMachineTemplate *gcpMachineTemplateDescription) deleteGCPMachineTemplate(oc *exutil.CLI) error {
e2e.Logf("Deleting gcpMachineTemplate ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("gcpmachinetemplate", gcpMachineTemplate.name, "-n", clusterAPINamespace).Execute()
}
func (capiMachineSetAWS *capiMachineSetAWSDescription) createCapiMachineSet(oc *exutil.CLI) {
e2e.Logf("Creating awsMachineSet ...")
if err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", capiMachineSetAWS.template, "-p", "NAME="+capiMachineSetAWS.name, "NAMESPACE="+clusterAPINamespace, "CLUSTERNAME="+capiMachineSetAWS.clusterName, "MACHINETEMPLATENAME="+capiMachineSetAWS.machineTemplateName, "KIND="+capiMachineSetAWS.kind, "REPLICAS="+strconv.Itoa(capiMachineSetAWS.replicas)); err != nil {
capiMachineSetAWS.deleteCapiMachineSet(oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
waitForCapiMachinesRunning(oc, capiMachineSetAWS.replicas, capiMachineSetAWS.name)
}
}
func (capiMachineSetAWS *capiMachineSetAWSDescription) deleteCapiMachineSet(oc *exutil.CLI) error {
e2e.Logf("Deleting awsMachineSet ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(capiMachineset, capiMachineSetAWS.name, "-n", clusterAPINamespace).Execute()
}
func (capiMachineSetgcp *capiMachineSetgcpDescription) createCapiMachineSetgcp(oc *exutil.CLI) {
e2e.Logf("Creating gcpMachineSet ...")
if err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", capiMachineSetgcp.template, "-p", "NAME="+capiMachineSetgcp.name, "NAMESPACE="+clusterAPINamespace, "CLUSTERNAME="+capiMachineSetgcp.clusterName, "MACHINETEMPLATENAME="+capiMachineSetgcp.machineTemplateName, "KIND="+capiMachineSetgcp.kind, "FAILUREDOMAIN="+capiMachineSetgcp.failureDomain, "REPLICAS="+strconv.Itoa(capiMachineSetgcp.replicas)); err != nil {
capiMachineSetgcp.deleteCapiMachineSetgcp(oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
waitForCapiMachinesRunning(oc, capiMachineSetgcp.replicas, capiMachineSetgcp.name)
}
}
func (capiMachineSetgcp *capiMachineSetgcpDescription) deleteCapiMachineSetgcp(oc *exutil.CLI) error {
e2e.Logf("Deleting gcpMachineSet ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(capiMachineset, capiMachineSetgcp.name, "-n", clusterAPINamespace).Execute()
}
func (vsphereMachineTemplate *vsphereMachineTemplateDescription) createvsphereMachineTemplate(oc *exutil.CLI) {
e2e.Logf("Creating vsphereMachineTemplate ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", vsphereMachineTemplate.template, "-p", "KIND="+vsphereMachineTemplate.kind, "NAME="+vsphereMachineTemplate.name, "NAMESPACE="+clusterAPINamespace, "VSPHERE_SERVER="+vsphereMachineTemplate.server, "DISKGIB="+strconv.Itoa(vsphereMachineTemplate.diskGiB), "CLONEMODE="+"linkedClone", "DATASTORE="+vsphereMachineTemplate.datastore, "DATACENTER="+vsphereMachineTemplate.datacenter, "FOLDER="+vsphereMachineTemplate.folder, "RESOURCEPOOL="+vsphereMachineTemplate.resourcePool, "NUMCPUS="+strconv.Itoa(vsphereMachineTemplate.numCPUs), "MEMORYMIB="+strconv.Itoa(vsphereMachineTemplate.memoryMiB), "NETWORKNAME="+vsphereMachineTemplate.networkName, "MACHINETEMPLATE="+vsphereMachineTemplate.machineTemplate)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (vsphereMachineTemplate *vsphereMachineTemplateDescription) deletevsphereMachineTemplate(oc *exutil.CLI) error {
e2e.Logf("Deleting vsphereMachineTemplate ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("VSpheremachinetemplate", vsphereMachineTemplate.name, "-n", clusterAPINamespace).Execute()
}
func (capiMachineSetvsphere *capiMachineSetvsphereDescription) createCapiMachineSetvsphere(oc *exutil.CLI) {
e2e.Logf("Creating vsphereMachineSet ...")
if err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", capiMachineSetvsphere.template, "-p", "NAME="+capiMachineSetvsphere.name, "NAMESPACE="+clusterAPINamespace, "CLUSTERNAME="+capiMachineSetvsphere.clusterName, "MACHINETEMPLATENAME="+capiMachineSetvsphere.machineTemplateName, "KIND="+capiMachineSetvsphere.kind, "DATASECRET="+capiMachineSetvsphere.dataSecretName, "REPLICAS="+strconv.Itoa(capiMachineSetvsphere.replicas)); err != nil {
capiMachineSetvsphere.deleteCapiMachineSetvsphere(oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
waitForCapiMachinesRunning(oc, capiMachineSetvsphere.replicas, capiMachineSetvsphere.name)
}
}
func (capiMachineSetvsphere *capiMachineSetvsphereDescription) deleteCapiMachineSetvsphere(oc *exutil.CLI) error {
e2e.Logf("Deleting vsphereMachineSet ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(capiMachineset, capiMachineSetvsphere.name, "-n", clusterAPINamespace).Execute()
}
// waitForCapiMachinesRunning check if all the machines are Running in a MachineSet
func waitForCapiMachinesRunning(oc *exutil.CLI, machineNumber int, machineSetName string) {
e2e.Logf("Waiting for the machines Running ...")
if machineNumber >= 1 {
// Wait 180 seconds first, as it uses total 1200 seconds in wait.poll, it may not be enough for some platform(s)
time.Sleep(180 * time.Second)
}
pollErr := wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) {
msg, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachineset, machineSetName, "-o=jsonpath={.status.readyReplicas}", "-n", clusterAPINamespace).Output()
machinesRunning, _ := strconv.Atoi(msg)
if machinesRunning != machineNumber {
e2e.Logf("Expected %v machine are not Running yet and waiting up to 1 minutes ...", machineNumber)
return false, nil
}
e2e.Logf("Expected %v machines are Running", machineNumber)
return true, nil
})
exutil.AssertWaitPollNoErr(pollErr, fmt.Sprintf("Expected %v machines are not Running after waiting up to 20 minutes ...", machineNumber))
e2e.Logf("All machines are Running ...")
}
// waitForCapiMachinesDisapper check if all the machines are Dissappered in a MachineSet
func waitForCapiMachinesDisapper(oc *exutil.CLI, machineSetName string) {
e2e.Logf("Waiting for the machines Dissapper ...")
err := wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) {
machineNames, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachine, "-o=jsonpath={.items[*].metadata.name}", "-l", "cluster.x-k8s.io/set-name="+machineSetName, "-n", clusterAPINamespace).Output()
if machineNames != "" {
e2e.Logf(" Still have machines are not Disappered yet and waiting up to 1 minutes ...")
return false, nil
}
e2e.Logf("All machines are Disappered")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Wait machine disappear failed.")
}
// waitForCapiMachinesDisappergcp check if all the machines are Dissappered in a MachineSet
func waitForCapiMachinesDisappergcp(oc *exutil.CLI, machineSetName string) {
e2e.Logf("Waiting for the machines Dissapper ...")
err := wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) {
machineNames, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", clusterAPINamespace).Output()
if strings.Contains(machineNames, machineSetName) {
e2e.Logf(" Still have machines are not Disappered yet and waiting up to 1 minutes ...")
return false, nil
}
e2e.Logf("All machines are Disappered")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Wait machine disappear failed.")
}
func matchProviderIDWithNode(oc *exutil.CLI, resourceType, resourceName, namespace string) (bool, error) {
machineProviderID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resourceType, resourceName, "-o=jsonpath={.spec.providerID}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resourceType, resourceName, "-o=jsonpath={.status.nodeRef.name}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeProviderID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.spec.providerID}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if string(machineProviderID) != string(nodeProviderID) {
e2e.Logf("Node & machine provider ID not matched")
return false, err
}
e2e.Logf("Node & machine provider ID matched")
return true, nil
}
| package clusterinfrastructure | ||||
function | openshift/openshift-tests-private | d31973d7-8d8a-42e1-b8f0-2bfdc9259a56 | skipForCAPINotExist | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func skipForCAPINotExist(oc *exutil.CLI) {
capi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", clusterAPINamespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if err != nil || len(capi) == 0 {
g.Skip("Skip for cluster api is not deployed!")
}
} | clusterinfrastructure | |||||
function | openshift/openshift-tests-private | 5aa71ae6-7b14-4163-8d4a-b282b47a37d4 | createCluster | ['clusterDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (cluster *clusterDescription) createCluster(oc *exutil.CLI) {
e2e.Logf("Creating cluster ...")
err := applyResourceFromTemplate(oc, "-f", cluster.template, "-p", "NAME="+cluster.name, "NAMESPACE="+clusterAPINamespace, "KIND="+cluster.kind)
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | e922309a-fadb-4589-8326-19c8e232eb73 | createClusterNotInCapiNamespace | ['clusterDescriptionNotInCapi'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (clusterNotInCapi *clusterDescriptionNotInCapi) createClusterNotInCapiNamespace(oc *exutil.CLI) {
e2e.Logf("Creating cluster in namepsace not openshift-cluster-api ...")
err := applyResourceFromTemplate(oc, "-f", clusterNotInCapi.template, "-p", "NAME="+clusterNotInCapi.name, "NAMESPACE="+clusterNotInCapi.namespace, "KIND="+clusterNotInCapi.kind)
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 7519e926-c243-4e31-ab21-5325532d8ce6 | createAWSMachineTemplate | ['"strconv"'] | ['awsMachineTemplateDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (awsMachineTemplate *awsMachineTemplateDescription) createAWSMachineTemplate(oc *exutil.CLI) {
e2e.Logf("Creating awsMachineTemplate ...")
if awsMachineTemplate.placementGroupPartition != 0 {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", awsMachineTemplate.template, "-p", "NAME="+awsMachineTemplate.name, "NAMESPACE="+clusterAPINamespace, "PROFILE="+awsMachineTemplate.profile, "INSTANCETYPE="+awsMachineTemplate.instanceType, "ZONE="+awsMachineTemplate.zone, "AMI="+awsMachineTemplate.ami, "SUBNETNAME="+awsMachineTemplate.subnetName, "SUBNETID="+awsMachineTemplate.subnetID, "SGNAME="+awsMachineTemplate.sgName, "PLACEMENTGROUPNAME="+awsMachineTemplate.placementGroupName, "PLACEMENTGROUPPARTITION="+strconv.Itoa(awsMachineTemplate.placementGroupPartition))
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", awsMachineTemplate.template, "-p", "NAME="+awsMachineTemplate.name, "NAMESPACE="+clusterAPINamespace, "PROFILE="+awsMachineTemplate.profile, "INSTANCETYPE="+awsMachineTemplate.instanceType, "ZONE="+awsMachineTemplate.zone, "AMI="+awsMachineTemplate.ami, "SUBNETNAME="+awsMachineTemplate.subnetName, "SUBNETID="+awsMachineTemplate.subnetID, "SGNAME="+awsMachineTemplate.sgName, "PLACEMENTGROUPNAME="+awsMachineTemplate.placementGroupName, "PLACEMENTGROUPPARTITION=null")
o.Expect(err).NotTo(o.HaveOccurred())
}
} | clusterinfrastructure | |||
function | openshift/openshift-tests-private | e6c32705-2202-42b5-a69a-031b05663fc0 | deleteAWSMachineTemplate | ['awsMachineTemplateDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (awsMachineTemplate *awsMachineTemplateDescription) deleteAWSMachineTemplate(oc *exutil.CLI) error {
e2e.Logf("Deleting awsMachineTemplate ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("awsmachinetemplate", awsMachineTemplate.name, "-n", clusterAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | ec5482be-59cd-4fba-a8d8-78957cf9d407 | createGCPMachineTemplate | ['gcpMachineTemplateDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (gcpMachineTemplate *gcpMachineTemplateDescription) createGCPMachineTemplate(oc *exutil.CLI) {
e2e.Logf("Creating gcpMachineTemplate ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", gcpMachineTemplate.template, "-p", "NAME="+gcpMachineTemplate.name, "NAMESPACE="+clusterAPINamespace, "IMAGE="+gcpMachineTemplate.image, "REGION="+gcpMachineTemplate.region, "CLUSTERID="+gcpMachineTemplate.clusterID, "MACHINETYPE="+gcpMachineTemplate.machineType, "SUBNETWORK="+gcpMachineTemplate.subnetwork, "SERVICEACCOUNT="+gcpMachineTemplate.serviceAccount)
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 5056d980-ff77-42c2-85fd-b67f7f60e715 | createGCPMachineTemplatePdBal | ['gcpMachineTemplateDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (gcpMachineTemplatepdbal *gcpMachineTemplateDescription) createGCPMachineTemplatePdBal(oc *exutil.CLI) {
e2e.Logf("Creating gcpMachineTemplate with pd balanced disk...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", gcpMachineTemplatepdbal.template, "-p", "NAME="+gcpMachineTemplatepdbal.name, "NAMESPACE="+clusterAPINamespace, "IMAGE="+gcpMachineTemplatepdbal.image, "REGION="+gcpMachineTemplatepdbal.region, "CLUSTERID="+gcpMachineTemplatepdbal.clusterID, "MACHINETYPE="+gcpMachineTemplatepdbal.machineType, "SUBNETWORK="+gcpMachineTemplatepdbal.subnetwork, "SERVICEACCOUNT="+gcpMachineTemplatepdbal.serviceAccount)
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 964ccef8-a308-46e4-84fc-8dcc36223b02 | deleteGCPMachineTemplate | ['gcpMachineTemplateDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (gcpMachineTemplate *gcpMachineTemplateDescription) deleteGCPMachineTemplate(oc *exutil.CLI) error {
e2e.Logf("Deleting gcpMachineTemplate ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("gcpmachinetemplate", gcpMachineTemplate.name, "-n", clusterAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 6c428e80-3e3a-4d32-ac3c-8ed70526e6bb | createCapiMachineSet | ['"strconv"'] | ['capiMachineSetAWSDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (capiMachineSetAWS *capiMachineSetAWSDescription) createCapiMachineSet(oc *exutil.CLI) {
e2e.Logf("Creating awsMachineSet ...")
if err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", capiMachineSetAWS.template, "-p", "NAME="+capiMachineSetAWS.name, "NAMESPACE="+clusterAPINamespace, "CLUSTERNAME="+capiMachineSetAWS.clusterName, "MACHINETEMPLATENAME="+capiMachineSetAWS.machineTemplateName, "KIND="+capiMachineSetAWS.kind, "REPLICAS="+strconv.Itoa(capiMachineSetAWS.replicas)); err != nil {
capiMachineSetAWS.deleteCapiMachineSet(oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
waitForCapiMachinesRunning(oc, capiMachineSetAWS.replicas, capiMachineSetAWS.name)
}
} | clusterinfrastructure | |||
function | openshift/openshift-tests-private | da83b151-b581-48d5-8e95-16a1f70bbeee | deleteCapiMachineSet | ['capiMachineSetAWSDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (capiMachineSetAWS *capiMachineSetAWSDescription) deleteCapiMachineSet(oc *exutil.CLI) error {
e2e.Logf("Deleting awsMachineSet ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(capiMachineset, capiMachineSetAWS.name, "-n", clusterAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | faae7fd9-f598-4579-afb9-26285cbbac27 | createCapiMachineSetgcp | ['"strconv"'] | ['capiMachineSetgcpDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (capiMachineSetgcp *capiMachineSetgcpDescription) createCapiMachineSetgcp(oc *exutil.CLI) {
e2e.Logf("Creating gcpMachineSet ...")
if err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", capiMachineSetgcp.template, "-p", "NAME="+capiMachineSetgcp.name, "NAMESPACE="+clusterAPINamespace, "CLUSTERNAME="+capiMachineSetgcp.clusterName, "MACHINETEMPLATENAME="+capiMachineSetgcp.machineTemplateName, "KIND="+capiMachineSetgcp.kind, "FAILUREDOMAIN="+capiMachineSetgcp.failureDomain, "REPLICAS="+strconv.Itoa(capiMachineSetgcp.replicas)); err != nil {
capiMachineSetgcp.deleteCapiMachineSetgcp(oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
waitForCapiMachinesRunning(oc, capiMachineSetgcp.replicas, capiMachineSetgcp.name)
}
} | clusterinfrastructure | |||
function | openshift/openshift-tests-private | 6b42621f-505b-4f41-91c9-6ce377cbd2a8 | deleteCapiMachineSetgcp | ['capiMachineSetgcpDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (capiMachineSetgcp *capiMachineSetgcpDescription) deleteCapiMachineSetgcp(oc *exutil.CLI) error {
e2e.Logf("Deleting gcpMachineSet ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(capiMachineset, capiMachineSetgcp.name, "-n", clusterAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | e4c348fd-ba11-4bc5-8a0b-c76d7515b446 | createvsphereMachineTemplate | ['"strconv"'] | ['vsphereMachineTemplateDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (vsphereMachineTemplate *vsphereMachineTemplateDescription) createvsphereMachineTemplate(oc *exutil.CLI) {
e2e.Logf("Creating vsphereMachineTemplate ...")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", vsphereMachineTemplate.template, "-p", "KIND="+vsphereMachineTemplate.kind, "NAME="+vsphereMachineTemplate.name, "NAMESPACE="+clusterAPINamespace, "VSPHERE_SERVER="+vsphereMachineTemplate.server, "DISKGIB="+strconv.Itoa(vsphereMachineTemplate.diskGiB), "CLONEMODE="+"linkedClone", "DATASTORE="+vsphereMachineTemplate.datastore, "DATACENTER="+vsphereMachineTemplate.datacenter, "FOLDER="+vsphereMachineTemplate.folder, "RESOURCEPOOL="+vsphereMachineTemplate.resourcePool, "NUMCPUS="+strconv.Itoa(vsphereMachineTemplate.numCPUs), "MEMORYMIB="+strconv.Itoa(vsphereMachineTemplate.memoryMiB), "NETWORKNAME="+vsphereMachineTemplate.networkName, "MACHINETEMPLATE="+vsphereMachineTemplate.machineTemplate)
o.Expect(err).NotTo(o.HaveOccurred())
} | clusterinfrastructure | |||
function | openshift/openshift-tests-private | ca4f70c4-45dd-4510-a4ea-bfc1efaa523d | deletevsphereMachineTemplate | ['vsphereMachineTemplateDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (vsphereMachineTemplate *vsphereMachineTemplateDescription) deletevsphereMachineTemplate(oc *exutil.CLI) error {
e2e.Logf("Deleting vsphereMachineTemplate ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args("VSpheremachinetemplate", vsphereMachineTemplate.name, "-n", clusterAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | d57876ab-7e05-4054-875b-d48de01ebcbf | createCapiMachineSetvsphere | ['"strconv"'] | ['capiMachineSetvsphereDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (capiMachineSetvsphere *capiMachineSetvsphereDescription) createCapiMachineSetvsphere(oc *exutil.CLI) {
e2e.Logf("Creating vsphereMachineSet ...")
if err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", capiMachineSetvsphere.template, "-p", "NAME="+capiMachineSetvsphere.name, "NAMESPACE="+clusterAPINamespace, "CLUSTERNAME="+capiMachineSetvsphere.clusterName, "MACHINETEMPLATENAME="+capiMachineSetvsphere.machineTemplateName, "KIND="+capiMachineSetvsphere.kind, "DATASECRET="+capiMachineSetvsphere.dataSecretName, "REPLICAS="+strconv.Itoa(capiMachineSetvsphere.replicas)); err != nil {
capiMachineSetvsphere.deleteCapiMachineSetvsphere(oc)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
waitForCapiMachinesRunning(oc, capiMachineSetvsphere.replicas, capiMachineSetvsphere.name)
}
} | clusterinfrastructure | |||
function | openshift/openshift-tests-private | 3c83ea36-1f65-4684-b1b4-1dff3470a351 | deleteCapiMachineSetvsphere | ['capiMachineSetvsphereDescription'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func (capiMachineSetvsphere *capiMachineSetvsphereDescription) deleteCapiMachineSetvsphere(oc *exutil.CLI) error {
e2e.Logf("Deleting vsphereMachineSet ...")
return oc.AsAdmin().WithoutNamespace().Run("delete").Args(capiMachineset, capiMachineSetvsphere.name, "-n", clusterAPINamespace).Execute()
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 9d0302a8-3e38-43cb-bd66-010630203877 | waitForCapiMachinesRunning | ['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func waitForCapiMachinesRunning(oc *exutil.CLI, machineNumber int, machineSetName string) {
e2e.Logf("Waiting for the machines Running ...")
if machineNumber >= 1 {
// Wait 180 seconds first, as it uses total 1200 seconds in wait.poll, it may not be enough for some platform(s)
time.Sleep(180 * time.Second)
}
pollErr := wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) {
msg, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachineset, machineSetName, "-o=jsonpath={.status.readyReplicas}", "-n", clusterAPINamespace).Output()
machinesRunning, _ := strconv.Atoi(msg)
if machinesRunning != machineNumber {
e2e.Logf("Expected %v machine are not Running yet and waiting up to 1 minutes ...", machineNumber)
return false, nil
}
e2e.Logf("Expected %v machines are Running", machineNumber)
return true, nil
})
exutil.AssertWaitPollNoErr(pollErr, fmt.Sprintf("Expected %v machines are not Running after waiting up to 20 minutes ...", machineNumber))
e2e.Logf("All machines are Running ...")
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 05a02e91-2234-4019-9f52-0d74be5b40f3 | waitForCapiMachinesDisapper | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func waitForCapiMachinesDisapper(oc *exutil.CLI, machineSetName string) {
e2e.Logf("Waiting for the machines Dissapper ...")
err := wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) {
machineNames, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachine, "-o=jsonpath={.items[*].metadata.name}", "-l", "cluster.x-k8s.io/set-name="+machineSetName, "-n", clusterAPINamespace).Output()
if machineNames != "" {
e2e.Logf(" Still have machines are not Disappered yet and waiting up to 1 minutes ...")
return false, nil
}
e2e.Logf("All machines are Disappered")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Wait machine disappear failed.")
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 2f3a0293-cb52-4b00-97b4-ec859fb5bb5c | waitForCapiMachinesDisappergcp | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func waitForCapiMachinesDisappergcp(oc *exutil.CLI, machineSetName string) {
e2e.Logf("Waiting for the machines Dissapper ...")
err := wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) {
machineNames, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(capiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", clusterAPINamespace).Output()
if strings.Contains(machineNames, machineSetName) {
e2e.Logf(" Still have machines are not Disappered yet and waiting up to 1 minutes ...")
return false, nil
}
e2e.Logf("All machines are Disappered")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Wait machine disappear failed.")
} | clusterinfrastructure | ||||
function | openshift/openshift-tests-private | 8d08705d-45bf-4ca7-a8c0-7375db1d60d6 | matchProviderIDWithNode | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/capi_utils.go | func matchProviderIDWithNode(oc *exutil.CLI, resourceType, resourceName, namespace string) (bool, error) {
machineProviderID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resourceType, resourceName, "-o=jsonpath={.spec.providerID}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resourceType, resourceName, "-o=jsonpath={.status.nodeRef.name}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeProviderID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.spec.providerID}", "-n", namespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if string(machineProviderID) != string(nodeProviderID) {
e2e.Logf("Node & machine provider ID not matched")
return false, err
}
e2e.Logf("Node & machine provider ID matched")
return true, nil
} | clusterinfrastructure | |||||
test | openshift/openshift-tests-private | 581e3908-e2b6-429b-8cae-55e9288821ca | ccm | import (
"encoding/base64"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | package clusterinfrastructure
import (
"encoding/base64"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure CCM", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("cloud-controller-manager", exutil.KubeConfigPath())
iaasPlatform clusterinfra.PlatformType
)
g.BeforeEach(func() {
iaasPlatform = clusterinfra.CheckPlatform(oc)
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-High-42927-CCM should honour cluster wide proxy settings", func() {
g.By("Check if it's a proxy cluster")
httpProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy/cluster", "-o=jsonpath={.spec.httpProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
httpsProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy/cluster", "-o=jsonpath={.spec.httpsProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(httpProxy) == 0 && len(httpsProxy) == 0 {
g.Skip("Skip for non-proxy cluster!")
}
g.By("Check if cloud-controller-manager is deployed")
ccm, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(ccm) == 0 {
g.Skip("Skip for cloud-controller-manager is not deployed!")
}
g.By("Check the proxy info for the cloud-controller-manager deployment")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", ccm, "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.spec.template.spec.containers[0].env}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("\"name\":\"HTTP_PROXY\",\"value\":\"" + httpProxy + "\""))
o.Expect(out).To(o.ContainSubstring("\"name\":\"HTTPS_PROXY\",\"value\":\"" + httpsProxy + "\""))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-High-43307-cloud-controller-manager clusteroperator should be in Available state", func() {
g.By("Check cluster does not have basecap set as None")
baseCapSet, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].spec.capabilities.baselineCapabilitySet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if baseCapSet == "None" {
g.Skip("Skip test when ccm co is not available")
}
state, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/cloud-controller-manager", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.ContainSubstring("TrueFalseFalse"))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-42879-Cloud-config configmap should be copied and kept in sync within the CCCMO namespace [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure, clusterinfra.VSphere)
g.By("Check if cloud-config cm is copied to openshift-cloud-controller-manager namespace")
ccmCM, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ccmCM).To(o.ContainSubstring("cloud-conf"))
g.By("Check if the sync is working correctly")
cmBeforePatch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/cloud-conf", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.data.cloud\\.conf}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm/cloud-conf", "-n", "openshift-cloud-controller-manager", "-p", `{"data":{"cloud.conf": "invalid"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cmAfterPatch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/cloud-conf", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.data.cloud\\.conf}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cmBeforePatch).Should(o.Equal(cmAfterPatch))
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Medium-63829-Target workload annotation should be present in deployments of ccm ", func() {
SkipIfCloudControllerManagerNotDeployed(oc)
checkDeployments := []struct {
namespace string
deployment string
}{
{
namespace: "openshift-controller-manager",
deployment: "controller-manager",
},
{
namespace: "openshift-controller-manager-operator",
deployment: "openshift-controller-manager-operator",
},
}
for _, checkDeployment := range checkDeployments {
g.By("Check target.workload annotation is present in yaml definition of deployment - " + checkDeployment.deployment)
WorkloadAnnotation, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", checkDeployment.deployment, "-n", checkDeployment.namespace, "-o=jsonpath={.spec.template.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(WorkloadAnnotation).To(o.ContainSubstring("\"target.workload.openshift.io/management\":\"{\\\"effect\\\": \\\"PreferredDuringScheduling\\\"}"))
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Critical-64657-Alibaba clusters are TechPreview and should not be upgradeable", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AlibabaCloud)
SkipIfCloudControllerManagerNotDeployed(oc)
g.By("Check cluster is TechPreview and should not be upgradeable")
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager", "-o=jsonpath={.status.conditions[*]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring("Alibaba platform is currently tech preview, upgrades are not allowed"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Medium-70019-Security Group and rules resource should be deleted when deleting a Ingress Controller", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
// skip on UPI because there is a bug: https://issues.redhat.com/browse/OCPBUGS-8213
clusterinfra.SkipConditionally(oc)
ccmBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "ccm")
ingressControllerTemplate := filepath.Join(ccmBaseDir, "ingressController70019.yaml")
ingressController := ingressControllerDescription{
template: ingressControllerTemplate,
name: "test-swtch-lb",
}
g.By("Create ingressController")
defer ingressController.deleteIngressController(oc)
ingressController.createIngressController(oc)
g.By("Get the dns")
var dns string
err := wait.Poll(2*time.Second, 60*time.Second, func() (bool, error) {
dnsfetched, dnsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("DNSRecord", ingressController.name+"-wildcard", "-n", "openshift-ingress-operator", "-o=jsonpath={.spec.targets[0]}").Output()
if dnsErr != nil {
e2e.Logf("hasn't got the dns ...")
return false, nil
}
dns = dnsfetched
e2e.Logf("got the dns, dns is: %s", dns)
return true, nil
})
exutil.AssertWaitPollNoErr(err, "got the dns failed")
dnskeys := strings.Split(dns, "-")
groupname := "k8s-elb-" + dnskeys[1]
e2e.Logf("groupname: %s", groupname)
g.By("Get the security group id")
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
sg, err := awsClient.GetSecurityGroupByGroupName(groupname)
if sg == nil {
g.Skip("The profile might not have SecurityGrp for router-default")
}
o.Expect(err).NotTo(o.HaveOccurred())
sgId := *sg.GroupId
e2e.Logf("sgId: %s", sgId)
ingressController.deleteIngressController(oc)
g.By("Wait the dns deleted")
err = wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
dnsfetched, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("DNSRecord", ingressController.name+"-wildcard", "-n", "openshift-ingress-operator", "-o=jsonpath={.spec.targets[0]}").Output()
if strings.Contains(dnsfetched, "NotFound") {
e2e.Logf("dns has been deleted")
return true, nil
}
e2e.Logf("still can get the dns, dns is: %s", dnsfetched)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "wait the dns delete failed")
g.By("Check the security group has also been deleted")
err = wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
sg, err1 := awsClient.GetSecurityGroupByGroupID(sgId)
if err1 != nil {
if strings.Contains(err1.Error(), "InvalidGroup.NotFound") {
e2e.Logf("security group has been deleted")
return true, nil
}
e2e.Logf("error: %s", err1.Error())
return false, nil
}
e2e.Logf("still can get the security group, sgId is: %s", *sg.GroupId)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "wait the security group delete failed")
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Medium-70296-AWS should not use external-cloud-volume-plugin post CSI migration", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
cmKubeControllerManager, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "config", "-n", "openshift-kube-controller-manager", "-o=yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cmKubeControllerManager).NotTo(o.ContainSubstring("external-cloud-volume-plugin"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-LEVEL0-Critical-70618-The new created nodes should be added to load balancer [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud)
var newNodeNames []string
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-70618"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer func() {
err := waitForClusterOperatorsReady(oc, "ingress", "console", "authentication")
exutil.AssertWaitPollNoErr(err, "co recovery fails!")
}()
defer func() {
err := waitForPodWithLabelReady(oc, "openshift-ingress", "ingresscontroller.operator.openshift.io/deployment-ingresscontroller=default")
exutil.AssertWaitPollNoErr(err, "pod recovery fails!")
}()
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":2,"template":{"spec":{"taints":null}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName)
machineNames := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
newNodeNames = append(newNodeNames, clusterinfra.GetNodeNameFromMachine(oc, machineNames[0]))
newNodeNames = append(newNodeNames, clusterinfra.GetNodeNameFromMachine(oc, machineNames[1]))
newNodeNameStr := newNodeNames[0] + " " + newNodeNames[1]
e2e.Logf("newNodeNames: %s", newNodeNameStr)
for _, value := range newNodeNames {
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("node", value, "testcase=70618").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", "openshift-ingress", `scheduler.alpha.kubernetes.io/node-selector=testcase=70618`).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", "openshift-ingress", `scheduler.alpha.kubernetes.io/node-selector-`).Execute()
g.By("Delete router pods and to make new ones running on new workers")
routerPodNameStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-ingress").Output()
o.Expect(err).NotTo(o.HaveOccurred())
routerPodNames := strings.Split(routerPodNameStr, " ")
g.By("Delete old router pods")
for _, value := range routerPodNames {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", value, "-n", "openshift-ingress").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("Wait old router pods disappear")
for _, value := range routerPodNames {
err = waitForResourceToDisappear(oc, "openshift-ingress", "pod/"+value)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Router %v failed to fully terminate", "pod/"+value))
}
g.By("Wait new router pods ready")
err = waitForPodWithLabelReady(oc, "openshift-ingress", "ingresscontroller.operator.openshift.io/deployment-ingresscontroller=default")
exutil.AssertWaitPollNoErr(err, "new router pod failed to be ready state within allowed time!")
newRouterPodOnNodeStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].spec.nodeName}", "-n", "openshift-ingress").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("newRouterPodOnNodeStr: %s", newRouterPodOnNodeStr)
newRouterPodOnNodes := strings.Split(newRouterPodOnNodeStr, " ")
g.By("Check new router pods running on new workers")
for _, value := range newRouterPodOnNodes {
o.Expect(strings.Contains(newNodeNameStr, value)).To(o.BeTrue())
}
g.By("Check co ingress console authentication are good")
err = waitForClusterOperatorsReady(oc, "ingress", "console", "authentication")
exutil.AssertWaitPollNoErr(err, "some co failed to be ready state within allowed time!")
})
// author: [email protected]
g.It("Author:zhsun-High-70620-Region and zone labels should be available on the nodes", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.IBMCloud, clusterinfra.OpenStack)
if iaasPlatform == clusterinfra.Azure {
azureStackCloud, azureErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(azureErr).NotTo(o.HaveOccurred())
if azureStackCloud == "AzureStackCloud" {
g.Skip("Skip for ASH due to we went straight to the CCM for ASH, so won't have the old labels!")
}
}
nodeLabel, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--show-labels").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(nodeLabel, "failure-domain.beta.kubernetes.io/region") && strings.Contains(nodeLabel, "topology.kubernetes.io/region") && strings.Contains(nodeLabel, "failure-domain.beta.kubernetes.io/zone") && strings.Contains(nodeLabel, "topology.kubernetes.io/zone")).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-High-70744-Pull images from ECR repository [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
clusterinfra.SkipForAwsOutpostCluster(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a ECR repository and get authorization token")
clusterinfra.GetAwsCredentialFromCluster(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
registryName := "ecr-" + infrastructureName
ecrClient := exutil.NewECRClient(region)
repositoryUri, err := ecrClient.CreateContainerRepository(registryName)
if err != nil {
g.Skip("unable to create container registry: " + err.Error())
}
defer func() {
err := ecrClient.DeleteContainerRepository(registryName)
o.Expect(err).NotTo(o.HaveOccurred())
}()
password, _ := ecrClient.GetAuthorizationToken()
o.Expect(password).NotTo(o.BeEmpty())
auth, err := exec.Command("bash", "-c", fmt.Sprintf("echo %s | base64 -d", password)).Output()
if err != nil {
g.Skip("unable to get authorization token: " + err.Error())
}
g.By("Mirror an image to ECR")
tempDataDir, err := extractPullSecret(oc)
defer os.RemoveAll(tempDataDir)
o.Expect(err).NotTo(o.HaveOccurred())
originAuth := filepath.Join(tempDataDir, ".dockerconfigjson")
authFile, err := appendPullSecretAuth(originAuth, strings.Split(repositoryUri, "/")[0], "", string(auth))
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("image").Args("mirror", "quay.io/openshifttest/pause@sha256:e481caec2eb984ce023673a3ec280bf57dea8c0305009e246b019b3eef044f9e", repositoryUri+":latest", "--insecure", "-a", authFile, "--keep-manifest-list=true").Execute()
if err != nil {
g.Skip("unable to mirror image to ECR: " + err.Error())
}
g.By("Add the AmazonEC2ContainerRegistryReadOnly policy to the worker nodes")
roleName := ""
if exutil.IsSNOCluster(oc) {
roleName = infrastructureName + "-master-role"
} else {
roleName = infrastructureName + "-worker-role"
}
policyArn := "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
iamClient := exutil.NewIAMClient()
err = iamClient.AttachRolePolicy(roleName, policyArn)
if err != nil {
g.Skip("unable to attach role policy: " + err.Error())
}
defer iamClient.DetachRolePolicy(roleName, policyArn)
g.By("Create a new project for testing")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", "hello-ecr70744").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", "hello-ecr70744", "--ignore-not-found", "--force").Execute()
g.By("Create a new app using the image on ECR")
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("--name=hello-ecr", "--image="+repositoryUri+":latest", "--allow-missing-images", "-n", "hello-ecr70744").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait the pod ready")
err = waitForPodWithLabelReady(oc, "hello-ecr70744", "deployment=hello-ecr")
exutil.AssertWaitPollNoErr(err, "the pod failed to be ready state within allowed time!")
})
// author: [email protected]
g.It("Author:zhsun-LEVEL0-Critical-70627-Service of type LoadBalancer can be created successful", func() {
clusterinfra.SkipForAwsOutpostCluster(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud)
if iaasPlatform == clusterinfra.AWS && strings.HasPrefix(getClusterRegion(oc), "us-iso") {
g.Skip("Skipped: There is no public subnet on AWS C2S/SC2S disconnected clusters!")
}
ccmBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "ccm")
loadBalancer := filepath.Join(ccmBaseDir, "svc-loadbalancer.yaml")
loadBalancerService := loadBalancerServiceDescription{
template: loadBalancer,
name: "svc-loadbalancer-70627",
namespace: oc.Namespace(),
}
g.By("Create loadBalancerService")
defer loadBalancerService.deleteLoadBalancerService(oc)
loadBalancerService.createLoadBalancerService(oc)
g.By("Check External-IP assigned")
getLBSvcIP(oc, loadBalancerService)
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-High-71492-Create CLB service on aws outposts cluster [Disruptive]", func() {
clusterinfra.SkipForNotAwsOutpostMixedCluster(oc)
exutil.By("1.1Get regular worker public subnetID")
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSessionWithRegion(region)
clusterID := clusterinfra.GetInfrastructureName(oc)
subnetId, err := awsClient.GetAwsPublicSubnetID(clusterID)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Subnet -->: %s", subnetId)
exutil.By("1.2Create loadBalancerService and pod")
lbNamespace := "ns-71492"
defer oc.DeleteSpecifiedNamespaceAsAdmin(lbNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(lbNamespace)
exutil.SetNamespacePrivileged(oc, lbNamespace)
ccmBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "ccm")
svc := filepath.Join(ccmBaseDir, "svc-loadbalancer-with-annotations.yaml")
pod := filepath.Join(ccmBaseDir, "pod.yaml")
svcForSubnet := loadBalancerServiceDescription{
template: svc,
name: "test-subnet-annotation",
awssubnet: subnetId,
namespace: lbNamespace,
}
defer svcForSubnet.deleteLoadBalancerService(oc)
svcForSubnet.createLoadBalancerService(oc)
podForSubnet := podDescription{
template: pod,
name: "test-subnet-annotation",
namespace: lbNamespace,
}
defer podForSubnet.deletePod(oc)
podForSubnet.createPod(oc)
waitForPodWithLabelReady(oc, lbNamespace, "name=test-subnet-annotation")
exutil.By("1.3Check External-IP assigned")
externalIPForSubnet := getLBSvcIP(oc, svcForSubnet)
e2e.Logf("externalIPForSubnet -->: %s", externalIPForSubnet)
exutil.By("1.4Check result,the svc can be accessed")
waitForLoadBalancerReady(oc, externalIPForSubnet)
exutil.By("2.1Add label for one regular node")
regularNodes := clusterinfra.ListNonOutpostWorkerNodes(oc)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", regularNodes[0], "key1-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", regularNodes[0], "key1=value1", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("regularnode -->: %s", regularNodes[0])
exutil.By("2.2Create loadBalancerService and pod")
svcForLabel := loadBalancerServiceDescription{
template: svc,
name: "test-label-annotation",
awssubnet: subnetId,
awslabel: "key1=value1",
namespace: lbNamespace,
}
defer svcForLabel.deleteLoadBalancerService(oc)
svcForLabel.createLoadBalancerService(oc)
podForLabel := podDescription{
template: pod,
name: "test-label-annotation",
namespace: lbNamespace,
}
defer podForLabel.deletePod(oc)
podForLabel.createPod(oc)
waitForPodWithLabelReady(oc, lbNamespace, "name=test-label-annotation")
exutil.By("2.3Check External-IP assigned")
externalIPForLabel := getLBSvcIP(oc, svcForLabel)
e2e.Logf("externalIPForLabel -->: %s", externalIPForLabel)
exutil.By("2.4Check result,the svc can be accessed")
waitForLoadBalancerReady(oc, externalIPForLabel)
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-High-72119-Pull images from GCR repository should succeed [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if projectID != "openshift-qe" {
g.Skip("Skip as no image in projectID" + projectID)
}
g.By("Create a new project for testing")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", "hello-gcr72119").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", "hello-gcr72119", "--ignore-not-found", "--force").Execute()
g.By("Create a new app using the image on GCR")
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("--name=hello-gcr", "--image=gcr.io/openshift-qe/hello-gcr:latest", "--allow-missing-images", "-n", "hello-gcr72119").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait the pod ready")
err = waitForPodWithLabelReady(oc, "hello-gcr72119", "deployment=hello-gcr")
exutil.AssertWaitPollNoErr(err, "the pod failed to be ready state within allowed time!")
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Medium-70689-CCM pods should restart to react to changes after credentials update [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.VSphere, clusterinfra.OpenStack)
var secretName, jsonString, patchPath, podLabel string
if iaasPlatform == clusterinfra.VSphere {
secretName = "vsphere-creds"
jsonString = "-o=jsonpath={.data.vcenter\\.devqe\\.ibmc\\.devcluster\\.openshift\\.com\\.password}"
patchPath = `{"data":{"vcenter.devqe.ibmc.devcluster.openshift.com.password": `
podLabel = "infrastructure.openshift.io/cloud-controller-manager=VSphere"
} else {
secretName = "openstack-credentials"
jsonString = "-o=jsonpath={.data.clouds\\.yaml}"
patchPath = `{"data":{"clouds.yaml": `
podLabel = "infrastructure.openshift.io/cloud-controller-manager=OpenStack"
}
currentSecret, err := oc.AsAdmin().WithoutNamespace().NotShowInfo().Run("get").Args("secret", secretName, jsonString, "-n", "kube-system").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if currentSecret == "" {
g.Skip("The password jsonString is not the defined one, skip the case!")
}
ccmPodNameStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-cloud-controller-manager").Output()
o.Expect(err).NotTo(o.HaveOccurred())
ccmPodNames := strings.Split(ccmPodNameStr, " ")
defer func() {
err := waitForPodWithLabelReady(oc, "openshift-cloud-controller-manager", podLabel)
exutil.AssertWaitPollNoErr(err, "pod recovery fails!")
}()
defer oc.AsAdmin().WithoutNamespace().NotShowInfo().Run("patch").Args("secret", secretName, "-n", "kube-system", "-p", patchPath+`"`+currentSecret+`"}}`, "--type=merge").Output()
_, err = oc.AsAdmin().WithoutNamespace().NotShowInfo().Run("patch").Args("secret", secretName, "-n", "kube-system", "-p", patchPath+`"`+base64.StdEncoding.EncodeToString([]byte(exutil.GetRandomString()))+`"}}`, "--type=merge").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait old ccm pods disappear")
for _, value := range ccmPodNames {
err = waitForResourceToDisappear(oc, "openshift-cloud-controller-manager", "pod/"+value)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("CCM %v failed to fully terminate", "pod/"+value))
}
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-High-72120-Pull images from ACR repository should succeed [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
azureCloudName, azureErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(azureErr).NotTo(o.HaveOccurred())
if azureCloudName == "AzureStackCloud" || azureCloudName == "AzureUSGovernmentCloud" {
g.Skip("Skip for ASH and azure Gov due to we didn't create container registry on them!")
}
if exutil.IsSTSCluster(oc) {
g.Skip("Skip on STS cluster, as MSI not available")
}
exutil.By("Create RoleAssignments for resourcegroup")
infrastructureID := clusterinfra.GetInfrastructureName(oc)
identityName := infrastructureID + "-identity"
resourceGroup, err := exutil.GetAzureCredentialFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
az, sessErr := exutil.NewAzureSessionFromEnv()
o.Expect(sessErr).NotTo(o.HaveOccurred())
principalId, _ := exutil.GetUserAssignedIdentityPrincipalID(az, resourceGroup, identityName)
roleAssignmentName, scope := "", ""
defer func() {
err := exutil.DeleteRoleAssignments(az, roleAssignmentName, scope)
o.Expect(err).NotTo(o.HaveOccurred())
}()
//AcrPull id is 7f951dda-4ed3-4680-a7ca-43fe172d538d, check from https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#containers
roleAssignmentName, scope = exutil.GrantRoleToPrincipalIDByResourceGroup(az, principalId, "os4-common", "7f951dda-4ed3-4680-a7ca-43fe172d538d")
exutil.By("Create a new project for testing")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", "hello-acr72120").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", "hello-acr72120", "--ignore-not-found", "--force").Execute()
exutil.By("Create a new app using the image on ACR")
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("--name=hello-acr", "--image=zhsunregistry.azurecr.io/hello-acr:latest", "--allow-missing-images", "-n", "hello-acr72120").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait the pod ready")
err = waitForPodWithLabelReady(oc, "hello-acr72120", "deployment=hello-acr")
exutil.AssertWaitPollNoErr(err, "the pod failed to be ready state within allowed time!")
})
// author: [email protected]
// Marking the test flaky due to issue https://issues.redhat.com/browse/OCPBUGS-42756
g.It("Author:zhsun-NonHyperShiftHOST-Medium-74047-The cloud-provider and cloud-config flags should be removed from KCM/KAS [Flaky]", func() {
SkipIfCloudControllerManagerNotDeployed(oc)
g.By("Check no `cloud-provider` and `cloud-config` set on KCM and KAS")
kapi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/config", "-n", "openshift-kube-apiserver", "-o=jsonpath={.data.config\\.yaml}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kapi).NotTo(o.ContainSubstring("cloud-provider"))
o.Expect(kapi).NotTo(o.ContainSubstring("cloud-config"))
kcm, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/config", "-n", "openshift-kube-controller-manager", "-o=jsonpath={.data.config\\.yaml}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kcm).NotTo(o.ContainSubstring("cloud-provider"))
o.Expect(kcm).NotTo(o.ContainSubstring("cloud-config"))
g.By("Check no `cloud-config` set on kubelet, but `--cloud-provider=external` still set on kubelet")
masterkubelet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineconfig/01-master-kubelet", "-o=jsonpath={.spec.config.systemd.units[1].contents}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(masterkubelet).To(o.ContainSubstring("cloud-provider=external"))
o.Expect(masterkubelet).NotTo(o.ContainSubstring("cloud-config"))
workerkubelet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineconfig/01-worker-kubelet", "-o=jsonpath={.spec.config.systemd.units[1].contents}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerkubelet).NotTo(o.ContainSubstring("cloud-config"))
o.Expect(workerkubelet).To(o.ContainSubstring("cloud-provider=external"))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Low-70682-Trust bundle CA configmap should have ownership annotations", func() {
g.By("Check cluster does not have basecap set as None")
baseCapSet, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].spec.capabilities.baselineCapabilitySet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if baseCapSet == "None" {
g.Skip("Skip test when ccm co is not available")
}
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "ccm-trusted-ca", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("Cloud Compute / Cloud Controller Manager"))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-High-73119-Create Internal LB service on aws/gcp/azure", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
ccmBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "ccm")
svc := filepath.Join(ccmBaseDir, "svc-loadbalancer-with-annotations.yaml")
lbNamespace := "ns-73119"
defer oc.DeleteSpecifiedNamespaceAsAdmin(lbNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(lbNamespace)
exutil.SetNamespacePrivileged(oc, lbNamespace)
svcForSubnet := loadBalancerServiceDescription{
template: svc,
name: "internal-lb-73119",
namespace: lbNamespace,
}
if iaasPlatform == clusterinfra.AWS {
exutil.By("Get worker private subnetID")
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSessionWithRegion(region)
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
subnetIds, err := awsClient.GetAwsPrivateSubnetIDs(vpcID)
o.Expect(subnetIds).ShouldNot(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
svcForSubnet.awssubnet = subnetIds[0]
}
if iaasPlatform == clusterinfra.GCP {
svcForSubnet.gcptype = "internal"
}
if iaasPlatform == clusterinfra.Azure {
defaultWorkerMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
subnet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, defaultWorkerMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.subnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
svcForSubnet.azureinternal = true
svcForSubnet.azuresubnet = subnet
}
exutil.By("Create internal loadBalancerService")
defer svcForSubnet.deleteLoadBalancerService(oc)
svcForSubnet.createLoadBalancerService(oc)
g.By("Check External-IP assigned")
getLBSvcIP(oc, svcForSubnet)
exutil.By("Get the Interanl LB ingress ip or hostname")
// AWS, IBMCloud use hostname, other cloud platforms use ip
internalLB, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", lbNamespace, "service", svcForSubnet.name, "-o=jsonpath={.status.loadBalancer.ingress}").Output()
e2e.Logf("the internal LB is %v", internalLB)
if iaasPlatform == clusterinfra.AWS {
o.Expect(internalLB).To(o.MatchRegexp(`"hostname":.*elb.*amazonaws.com`))
} else {
o.Expect(internalLB).To(o.MatchRegexp(`"ip":"10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"`))
}
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-70621-cloud-controller-manager should be Upgradeable is True when Degraded is False [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.IBMCloud, clusterinfra.Nutanix, clusterinfra.VSphere, clusterinfra.OpenStack)
ccm, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager").Output()
if !strings.Contains(ccm, "cloud-controller-manager") {
g.Skip("This case is not executable when cloud-controller-manager CO is absent")
}
e2e.Logf("Delete cm to make co cloud-controller-manager Degraded=True")
cloudProviderConfigCMFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "cloud-provider-config", "-n", "openshift-config", "-oyaml").OutputToFile("70621-cloud-provider-config-cm.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "cloud-provider-config", "-n", "openshift-config").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
os.Remove(cloudProviderConfigCMFile)
}()
defer func() {
e2e.Logf("Recreate the deleted cm to recover cluster, cm kube-cloud-config can be recreated by cluster")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", cloudProviderConfigCMFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
state, checkClusterOperatorConditionErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager", "-o", "jsonpath={.status.conditions[?(@.type==\"Degraded\")].status}{.status.conditions[?(@.type==\"Upgradeable\")].status}").Output()
o.Expect(checkClusterOperatorConditionErr).NotTo(o.HaveOccurred())
o.Expect(state).To(o.ContainSubstring("FalseTrue"))
}()
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "kube-cloud-config", "-n", "openshift-config-managed").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Co cloud-controller-manager Degraded=True, Upgradeable=false")
state, checkClusterOperatorConditionErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager", "-o", "jsonpath={.status.conditions[?(@.type==\"Degraded\")].status}{.status.conditions[?(@.type==\"Upgradeable\")].status}").Output()
o.Expect(checkClusterOperatorConditionErr).NotTo(o.HaveOccurred())
o.Expect(state).To(o.ContainSubstring("TrueFalse"))
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Medium-63778-cloud-controller-manager should be Upgradeable is True on None clusters", func() {
exutil.SkipIfPlatformTypeNot(oc, "None")
g.By("Check Upgradeable status is True")
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator", "cloud-controller-manager", `-o=jsonpath={.status.conditions[?(@.type=="Upgradeable")].status}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(status, "True") != 0 {
e2e.Failf("Upgradeable status is not True")
}
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-69871-Cloud Controller Manager Operator metrics should only be available via https", func() {
g.By("Check cluster does not have basecap set as None")
baseCapSet, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].spec.capabilities.baselineCapabilitySet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if baseCapSet == "None" {
g.Skip("Skip test when ccm co is not available")
}
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-l", "k8s-app=cloud-manager-operator", "-n", "openshift-cloud-controller-manager-operator").Output()
o.Expect(err).NotTo(o.HaveOccurred())
url_http := "http://127.0.0.0:9257/metrics"
url_https := "https://127.0.0.0:9258/metrics"
curlOutputHttp, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", "openshift-cloud-controller-manager-operator", "-i", "--", "curl", url_http).Output()
o.Expect(curlOutputHttp).To(o.ContainSubstring("Connection refused"))
curlOutputHttps, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", "openshift-cloud-controller-manager-operator", "-i", "--", "curl", url_https).Output()
o.Expect(curlOutputHttps).To(o.ContainSubstring("SSL certificate problem"))
})
// author: [email protected]
g.It("Author:miyadav-Low-70124-system:openshift:kube-controller-manager:gce-cloud-provider referencing non existing serviceAccount", func() {
_, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrolebinding", "system:openshift:kube-controller-manager:gce-cloud-provider").Output()
o.Expect(err).To(o.HaveOccurred())
platformType := clusterinfra.CheckPlatform(oc)
if platformType == clusterinfra.GCP {
sa, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sa", "cloud-provider", "-n", "kube-system").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(sa, "cloud-provider")).To(o.BeTrue())
} else {
_, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sa", "cloud-provider", "-n", "kube-system").Output()
o.Expect(err).To(o.HaveOccurred())
}
})
// author: [email protected]
g.It("Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-70566-Garbage in cloud-controller-manager status [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.AlibabaCloud, clusterinfra.VSphere, clusterinfra.IBMCloud)
g.By("Delete the namespace openshift-cloud-controller-manager")
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", "openshift-cloud-controller-manager").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring("project.project.openshift.io \"openshift-cloud-controller-manager\" deleted"))
defer func() {
err = wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) {
g.By("Check co cloud-controller-manager is back")
state, checkCloudControllerManagerErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager", "-o", "jsonpath={.status.conditions}").Output()
if checkCloudControllerManagerErr != nil {
e2e.Logf("try next because of err %v", checkCloudControllerManagerErr)
return false, nil
}
if strings.Contains(state, "Trusted CA Bundle Controller works as expected") {
e2e.Logf("Co is back now")
return true, nil
}
e2e.Logf("Still waiting up to 1 minute ...")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "co is not recovered")
}()
g.By("Check co cloud-controller-manager error message")
state, checkCloudControllerManagerErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager", "-o", "jsonpath={.status.conditions}").Output()
o.Expect(checkCloudControllerManagerErr).NotTo(o.HaveOccurred())
o.Expect(state).To(o.ContainSubstring("TrustedCABundleControllerControllerDegraded condition is set to True"))
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | 214b37a9-b914-4c02-8ecb-ca2bb2dc8dda | Author:zhsun-NonHyperShiftHOST-High-42927-CCM should honour cluster wide proxy settings | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-High-42927-CCM should honour cluster wide proxy settings", func() {
g.By("Check if it's a proxy cluster")
httpProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy/cluster", "-o=jsonpath={.spec.httpProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
httpsProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy/cluster", "-o=jsonpath={.spec.httpsProxy}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(httpProxy) == 0 && len(httpsProxy) == 0 {
g.Skip("Skip for non-proxy cluster!")
}
g.By("Check if cloud-controller-manager is deployed")
ccm, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(ccm) == 0 {
g.Skip("Skip for cloud-controller-manager is not deployed!")
}
g.By("Check the proxy info for the cloud-controller-manager deployment")
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", ccm, "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.spec.template.spec.containers[0].env}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("\"name\":\"HTTP_PROXY\",\"value\":\"" + httpProxy + "\""))
o.Expect(out).To(o.ContainSubstring("\"name\":\"HTTPS_PROXY\",\"value\":\"" + httpsProxy + "\""))
}) | ||||||
test case | openshift/openshift-tests-private | a9db694e-4f2a-4259-b50c-4fdba4b4a69c | Author:zhsun-NonHyperShiftHOST-High-43307-cloud-controller-manager clusteroperator should be in Available state | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-High-43307-cloud-controller-manager clusteroperator should be in Available state", func() {
g.By("Check cluster does not have basecap set as None")
baseCapSet, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].spec.capabilities.baselineCapabilitySet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if baseCapSet == "None" {
g.Skip("Skip test when ccm co is not available")
}
state, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/cloud-controller-manager", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.ContainSubstring("TrueFalseFalse"))
}) | ||||||
test case | openshift/openshift-tests-private | d5769116-5270-4a0b-a564-8c9bd7f8d7c1 | Author:zhsun-NonHyperShiftHOST-Medium-42879-Cloud-config configmap should be copied and kept in sync within the CCCMO namespace [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-42879-Cloud-config configmap should be copied and kept in sync within the CCCMO namespace [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure, clusterinfra.VSphere)
g.By("Check if cloud-config cm is copied to openshift-cloud-controller-manager namespace")
ccmCM, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ccmCM).To(o.ContainSubstring("cloud-conf"))
g.By("Check if the sync is working correctly")
cmBeforePatch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/cloud-conf", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.data.cloud\\.conf}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("cm/cloud-conf", "-n", "openshift-cloud-controller-manager", "-p", `{"data":{"cloud.conf": "invalid"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cmAfterPatch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/cloud-conf", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.data.cloud\\.conf}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cmBeforePatch).Should(o.Equal(cmAfterPatch))
}) | |||||
test case | openshift/openshift-tests-private | ae205769-f115-4178-8e96-d478524ec201 | Author:miyadav-NonHyperShiftHOST-Medium-63829-Target workload annotation should be present in deployments of ccm | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:miyadav-NonHyperShiftHOST-Medium-63829-Target workload annotation should be present in deployments of ccm ", func() {
SkipIfCloudControllerManagerNotDeployed(oc)
checkDeployments := []struct {
namespace string
deployment string
}{
{
namespace: "openshift-controller-manager",
deployment: "controller-manager",
},
{
namespace: "openshift-controller-manager-operator",
deployment: "openshift-controller-manager-operator",
},
}
for _, checkDeployment := range checkDeployments {
g.By("Check target.workload annotation is present in yaml definition of deployment - " + checkDeployment.deployment)
WorkloadAnnotation, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", checkDeployment.deployment, "-n", checkDeployment.namespace, "-o=jsonpath={.spec.template.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(WorkloadAnnotation).To(o.ContainSubstring("\"target.workload.openshift.io/management\":\"{\\\"effect\\\": \\\"PreferredDuringScheduling\\\"}"))
}
}) | ||||||
test case | openshift/openshift-tests-private | 410ed65b-3061-4e1c-bdc2-b1b766fa4d75 | Author:miyadav-NonHyperShiftHOST-Critical-64657-Alibaba clusters are TechPreview and should not be upgradeable | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:miyadav-NonHyperShiftHOST-Critical-64657-Alibaba clusters are TechPreview and should not be upgradeable", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AlibabaCloud)
SkipIfCloudControllerManagerNotDeployed(oc)
g.By("Check cluster is TechPreview and should not be upgradeable")
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager", "-o=jsonpath={.status.conditions[*]}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring("Alibaba platform is currently tech preview, upgrades are not allowed"))
}) | |||||
test case | openshift/openshift-tests-private | 0014115a-9c4e-4bdc-ae78-b6794f9b966a | Author:huliu-NonHyperShiftHOST-Medium-70019-Security Group and rules resource should be deleted when deleting a Ingress Controller | ['"path/filepath"', '"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:huliu-NonHyperShiftHOST-Medium-70019-Security Group and rules resource should be deleted when deleting a Ingress Controller", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
// skip on UPI because there is a bug: https://issues.redhat.com/browse/OCPBUGS-8213
clusterinfra.SkipConditionally(oc)
ccmBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "ccm")
ingressControllerTemplate := filepath.Join(ccmBaseDir, "ingressController70019.yaml")
ingressController := ingressControllerDescription{
template: ingressControllerTemplate,
name: "test-swtch-lb",
}
g.By("Create ingressController")
defer ingressController.deleteIngressController(oc)
ingressController.createIngressController(oc)
g.By("Get the dns")
var dns string
err := wait.Poll(2*time.Second, 60*time.Second, func() (bool, error) {
dnsfetched, dnsErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("DNSRecord", ingressController.name+"-wildcard", "-n", "openshift-ingress-operator", "-o=jsonpath={.spec.targets[0]}").Output()
if dnsErr != nil {
e2e.Logf("hasn't got the dns ...")
return false, nil
}
dns = dnsfetched
e2e.Logf("got the dns, dns is: %s", dns)
return true, nil
})
exutil.AssertWaitPollNoErr(err, "got the dns failed")
dnskeys := strings.Split(dns, "-")
groupname := "k8s-elb-" + dnskeys[1]
e2e.Logf("groupname: %s", groupname)
g.By("Get the security group id")
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
sg, err := awsClient.GetSecurityGroupByGroupName(groupname)
if sg == nil {
g.Skip("The profile might not have SecurityGrp for router-default")
}
o.Expect(err).NotTo(o.HaveOccurred())
sgId := *sg.GroupId
e2e.Logf("sgId: %s", sgId)
ingressController.deleteIngressController(oc)
g.By("Wait the dns deleted")
err = wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
dnsfetched, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("DNSRecord", ingressController.name+"-wildcard", "-n", "openshift-ingress-operator", "-o=jsonpath={.spec.targets[0]}").Output()
if strings.Contains(dnsfetched, "NotFound") {
e2e.Logf("dns has been deleted")
return true, nil
}
e2e.Logf("still can get the dns, dns is: %s", dnsfetched)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "wait the dns delete failed")
g.By("Check the security group has also been deleted")
err = wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
sg, err1 := awsClient.GetSecurityGroupByGroupID(sgId)
if err1 != nil {
if strings.Contains(err1.Error(), "InvalidGroup.NotFound") {
e2e.Logf("security group has been deleted")
return true, nil
}
e2e.Logf("error: %s", err1.Error())
return false, nil
}
e2e.Logf("still can get the security group, sgId is: %s", *sg.GroupId)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "wait the security group delete failed")
}) | |||||
test case | openshift/openshift-tests-private | 9a2c9f0d-8f47-4742-886d-b2890cc679c8 | Author:huliu-NonHyperShiftHOST-Medium-70296-AWS should not use external-cloud-volume-plugin post CSI migration | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:huliu-NonHyperShiftHOST-Medium-70296-AWS should not use external-cloud-volume-plugin post CSI migration", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
cmKubeControllerManager, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "config", "-n", "openshift-kube-controller-manager", "-o=yaml").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cmKubeControllerManager).NotTo(o.ContainSubstring("external-cloud-volume-plugin"))
}) | |||||
test case | openshift/openshift-tests-private | 099031f6-dd47-4224-9798-f2930a22a067 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-LEVEL0-Critical-70618-The new created nodes should be added to load balancer [Disruptive][Slow] | ['"fmt"', '"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-LEVEL0-Critical-70618-The new created nodes should be added to load balancer [Disruptive][Slow]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud)
var newNodeNames []string
g.By("Create a new machineset")
infrastructureName := clusterinfra.GetInfrastructureName(oc)
machinesetName := infrastructureName + "-70618"
ms := clusterinfra.MachineSetDescription{Name: machinesetName, Replicas: 0}
defer func() {
err := waitForClusterOperatorsReady(oc, "ingress", "console", "authentication")
exutil.AssertWaitPollNoErr(err, "co recovery fails!")
}()
defer func() {
err := waitForPodWithLabelReady(oc, "openshift-ingress", "ingresscontroller.operator.openshift.io/deployment-ingresscontroller=default")
exutil.AssertWaitPollNoErr(err, "pod recovery fails!")
}()
defer clusterinfra.WaitForMachinesDisapper(oc, machinesetName)
defer ms.DeleteMachineSet(oc)
ms.CreateMachineSet(oc)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args(mapiMachineset, machinesetName, "-n", "openshift-machine-api", "-p", `{"spec":{"replicas":2,"template":{"spec":{"taints":null}}}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachinesRunning(oc, 2, machinesetName)
machineNames := clusterinfra.GetMachineNamesFromMachineSet(oc, machinesetName)
newNodeNames = append(newNodeNames, clusterinfra.GetNodeNameFromMachine(oc, machineNames[0]))
newNodeNames = append(newNodeNames, clusterinfra.GetNodeNameFromMachine(oc, machineNames[1]))
newNodeNameStr := newNodeNames[0] + " " + newNodeNames[1]
e2e.Logf("newNodeNames: %s", newNodeNameStr)
for _, value := range newNodeNames {
err := oc.AsAdmin().WithoutNamespace().Run("label").Args("node", value, "testcase=70618").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
err = oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", "openshift-ingress", `scheduler.alpha.kubernetes.io/node-selector=testcase=70618`).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("annotate").Args("ns", "openshift-ingress", `scheduler.alpha.kubernetes.io/node-selector-`).Execute()
g.By("Delete router pods and to make new ones running on new workers")
routerPodNameStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-ingress").Output()
o.Expect(err).NotTo(o.HaveOccurred())
routerPodNames := strings.Split(routerPodNameStr, " ")
g.By("Delete old router pods")
for _, value := range routerPodNames {
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", value, "-n", "openshift-ingress").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("Wait old router pods disappear")
for _, value := range routerPodNames {
err = waitForResourceToDisappear(oc, "openshift-ingress", "pod/"+value)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Router %v failed to fully terminate", "pod/"+value))
}
g.By("Wait new router pods ready")
err = waitForPodWithLabelReady(oc, "openshift-ingress", "ingresscontroller.operator.openshift.io/deployment-ingresscontroller=default")
exutil.AssertWaitPollNoErr(err, "new router pod failed to be ready state within allowed time!")
newRouterPodOnNodeStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].spec.nodeName}", "-n", "openshift-ingress").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("newRouterPodOnNodeStr: %s", newRouterPodOnNodeStr)
newRouterPodOnNodes := strings.Split(newRouterPodOnNodeStr, " ")
g.By("Check new router pods running on new workers")
for _, value := range newRouterPodOnNodes {
o.Expect(strings.Contains(newNodeNameStr, value)).To(o.BeTrue())
}
g.By("Check co ingress console authentication are good")
err = waitForClusterOperatorsReady(oc, "ingress", "console", "authentication")
exutil.AssertWaitPollNoErr(err, "some co failed to be ready state within allowed time!")
}) | |||||
test case | openshift/openshift-tests-private | 4883ae8d-f6b8-4d35-80c0-33b249134fd1 | Author:zhsun-High-70620-Region and zone labels should be available on the nodes | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-High-70620-Region and zone labels should be available on the nodes", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.IBMCloud, clusterinfra.OpenStack)
if iaasPlatform == clusterinfra.Azure {
azureStackCloud, azureErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(azureErr).NotTo(o.HaveOccurred())
if azureStackCloud == "AzureStackCloud" {
g.Skip("Skip for ASH due to we went straight to the CCM for ASH, so won't have the old labels!")
}
}
nodeLabel, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "--show-labels").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(nodeLabel, "failure-domain.beta.kubernetes.io/region") && strings.Contains(nodeLabel, "topology.kubernetes.io/region") && strings.Contains(nodeLabel, "failure-domain.beta.kubernetes.io/zone") && strings.Contains(nodeLabel, "topology.kubernetes.io/zone")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 8b86389d-d06a-4803-822f-afdc4426df73 | Author:huliu-NonHyperShiftHOST-High-70744-Pull images from ECR repository [Disruptive] | ['"encoding/base64"', '"fmt"', '"os"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:huliu-NonHyperShiftHOST-High-70744-Pull images from ECR repository [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS)
clusterinfra.SkipForAwsOutpostCluster(oc)
region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create a ECR repository and get authorization token")
clusterinfra.GetAwsCredentialFromCluster(oc)
infrastructureName := clusterinfra.GetInfrastructureName(oc)
registryName := "ecr-" + infrastructureName
ecrClient := exutil.NewECRClient(region)
repositoryUri, err := ecrClient.CreateContainerRepository(registryName)
if err != nil {
g.Skip("unable to create container registry: " + err.Error())
}
defer func() {
err := ecrClient.DeleteContainerRepository(registryName)
o.Expect(err).NotTo(o.HaveOccurred())
}()
password, _ := ecrClient.GetAuthorizationToken()
o.Expect(password).NotTo(o.BeEmpty())
auth, err := exec.Command("bash", "-c", fmt.Sprintf("echo %s | base64 -d", password)).Output()
if err != nil {
g.Skip("unable to get authorization token: " + err.Error())
}
g.By("Mirror an image to ECR")
tempDataDir, err := extractPullSecret(oc)
defer os.RemoveAll(tempDataDir)
o.Expect(err).NotTo(o.HaveOccurred())
originAuth := filepath.Join(tempDataDir, ".dockerconfigjson")
authFile, err := appendPullSecretAuth(originAuth, strings.Split(repositoryUri, "/")[0], "", string(auth))
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("image").Args("mirror", "quay.io/openshifttest/pause@sha256:e481caec2eb984ce023673a3ec280bf57dea8c0305009e246b019b3eef044f9e", repositoryUri+":latest", "--insecure", "-a", authFile, "--keep-manifest-list=true").Execute()
if err != nil {
g.Skip("unable to mirror image to ECR: " + err.Error())
}
g.By("Add the AmazonEC2ContainerRegistryReadOnly policy to the worker nodes")
roleName := ""
if exutil.IsSNOCluster(oc) {
roleName = infrastructureName + "-master-role"
} else {
roleName = infrastructureName + "-worker-role"
}
policyArn := "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
iamClient := exutil.NewIAMClient()
err = iamClient.AttachRolePolicy(roleName, policyArn)
if err != nil {
g.Skip("unable to attach role policy: " + err.Error())
}
defer iamClient.DetachRolePolicy(roleName, policyArn)
g.By("Create a new project for testing")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", "hello-ecr70744").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", "hello-ecr70744", "--ignore-not-found", "--force").Execute()
g.By("Create a new app using the image on ECR")
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("--name=hello-ecr", "--image="+repositoryUri+":latest", "--allow-missing-images", "-n", "hello-ecr70744").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait the pod ready")
err = waitForPodWithLabelReady(oc, "hello-ecr70744", "deployment=hello-ecr")
exutil.AssertWaitPollNoErr(err, "the pod failed to be ready state within allowed time!")
}) | |||||
test case | openshift/openshift-tests-private | fb4633b7-a43e-4edc-8eb2-6e1bcb2f4f36 | Author:zhsun-LEVEL0-Critical-70627-Service of type LoadBalancer can be created successful | ['"path/filepath"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-LEVEL0-Critical-70627-Service of type LoadBalancer can be created successful", func() {
clusterinfra.SkipForAwsOutpostCluster(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.IBMCloud, clusterinfra.AlibabaCloud)
if iaasPlatform == clusterinfra.AWS && strings.HasPrefix(getClusterRegion(oc), "us-iso") {
g.Skip("Skipped: There is no public subnet on AWS C2S/SC2S disconnected clusters!")
}
ccmBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "ccm")
loadBalancer := filepath.Join(ccmBaseDir, "svc-loadbalancer.yaml")
loadBalancerService := loadBalancerServiceDescription{
template: loadBalancer,
name: "svc-loadbalancer-70627",
namespace: oc.Namespace(),
}
g.By("Create loadBalancerService")
defer loadBalancerService.deleteLoadBalancerService(oc)
loadBalancerService.createLoadBalancerService(oc)
g.By("Check External-IP assigned")
getLBSvcIP(oc, loadBalancerService)
}) | |||||
test case | openshift/openshift-tests-private | 1d48ee3b-b846-473f-b785-0bddcf472d1c | Author:zhsun-NonHyperShiftHOST-High-71492-Create CLB service on aws outposts cluster [Disruptive] | ['"path/filepath"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-High-71492-Create CLB service on aws outposts cluster [Disruptive]", func() {
clusterinfra.SkipForNotAwsOutpostMixedCluster(oc)
exutil.By("1.1Get regular worker public subnetID")
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSessionWithRegion(region)
clusterID := clusterinfra.GetInfrastructureName(oc)
subnetId, err := awsClient.GetAwsPublicSubnetID(clusterID)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Subnet -->: %s", subnetId)
exutil.By("1.2Create loadBalancerService and pod")
lbNamespace := "ns-71492"
defer oc.DeleteSpecifiedNamespaceAsAdmin(lbNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(lbNamespace)
exutil.SetNamespacePrivileged(oc, lbNamespace)
ccmBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "ccm")
svc := filepath.Join(ccmBaseDir, "svc-loadbalancer-with-annotations.yaml")
pod := filepath.Join(ccmBaseDir, "pod.yaml")
svcForSubnet := loadBalancerServiceDescription{
template: svc,
name: "test-subnet-annotation",
awssubnet: subnetId,
namespace: lbNamespace,
}
defer svcForSubnet.deleteLoadBalancerService(oc)
svcForSubnet.createLoadBalancerService(oc)
podForSubnet := podDescription{
template: pod,
name: "test-subnet-annotation",
namespace: lbNamespace,
}
defer podForSubnet.deletePod(oc)
podForSubnet.createPod(oc)
waitForPodWithLabelReady(oc, lbNamespace, "name=test-subnet-annotation")
exutil.By("1.3Check External-IP assigned")
externalIPForSubnet := getLBSvcIP(oc, svcForSubnet)
e2e.Logf("externalIPForSubnet -->: %s", externalIPForSubnet)
exutil.By("1.4Check result,the svc can be accessed")
waitForLoadBalancerReady(oc, externalIPForSubnet)
exutil.By("2.1Add label for one regular node")
regularNodes := clusterinfra.ListNonOutpostWorkerNodes(oc)
defer oc.AsAdmin().WithoutNamespace().Run("label").Args("node", regularNodes[0], "key1-").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("label").Args("node", regularNodes[0], "key1=value1", "--overwrite").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("regularnode -->: %s", regularNodes[0])
exutil.By("2.2Create loadBalancerService and pod")
svcForLabel := loadBalancerServiceDescription{
template: svc,
name: "test-label-annotation",
awssubnet: subnetId,
awslabel: "key1=value1",
namespace: lbNamespace,
}
defer svcForLabel.deleteLoadBalancerService(oc)
svcForLabel.createLoadBalancerService(oc)
podForLabel := podDescription{
template: pod,
name: "test-label-annotation",
namespace: lbNamespace,
}
defer podForLabel.deletePod(oc)
podForLabel.createPod(oc)
waitForPodWithLabelReady(oc, lbNamespace, "name=test-label-annotation")
exutil.By("2.3Check External-IP assigned")
externalIPForLabel := getLBSvcIP(oc, svcForLabel)
e2e.Logf("externalIPForLabel -->: %s", externalIPForLabel)
exutil.By("2.4Check result,the svc can be accessed")
waitForLoadBalancerReady(oc, externalIPForLabel)
}) | |||||
test case | openshift/openshift-tests-private | 596cbae3-0e70-4ba1-b2f8-f0596f2a8f33 | Author:zhsun-NonHyperShiftHOST-High-72119-Pull images from GCR repository should succeed [Disruptive] | ['"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-High-72119-Pull images from GCR repository should succeed [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if projectID != "openshift-qe" {
g.Skip("Skip as no image in projectID" + projectID)
}
g.By("Create a new project for testing")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", "hello-gcr72119").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", "hello-gcr72119", "--ignore-not-found", "--force").Execute()
g.By("Create a new app using the image on GCR")
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("--name=hello-gcr", "--image=gcr.io/openshift-qe/hello-gcr:latest", "--allow-missing-images", "-n", "hello-gcr72119").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait the pod ready")
err = waitForPodWithLabelReady(oc, "hello-gcr72119", "deployment=hello-gcr")
exutil.AssertWaitPollNoErr(err, "the pod failed to be ready state within allowed time!")
}) | |||||
test case | openshift/openshift-tests-private | 18638d92-d909-4cd9-b1ec-15e1737545f5 | Author:huliu-NonHyperShiftHOST-Medium-70689-CCM pods should restart to react to changes after credentials update [Disruptive] | ['"encoding/base64"', '"fmt"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:huliu-NonHyperShiftHOST-Medium-70689-CCM pods should restart to react to changes after credentials update [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.VSphere, clusterinfra.OpenStack)
var secretName, jsonString, patchPath, podLabel string
if iaasPlatform == clusterinfra.VSphere {
secretName = "vsphere-creds"
jsonString = "-o=jsonpath={.data.vcenter\\.devqe\\.ibmc\\.devcluster\\.openshift\\.com\\.password}"
patchPath = `{"data":{"vcenter.devqe.ibmc.devcluster.openshift.com.password": `
podLabel = "infrastructure.openshift.io/cloud-controller-manager=VSphere"
} else {
secretName = "openstack-credentials"
jsonString = "-o=jsonpath={.data.clouds\\.yaml}"
patchPath = `{"data":{"clouds.yaml": `
podLabel = "infrastructure.openshift.io/cloud-controller-manager=OpenStack"
}
currentSecret, err := oc.AsAdmin().WithoutNamespace().NotShowInfo().Run("get").Args("secret", secretName, jsonString, "-n", "kube-system").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if currentSecret == "" {
g.Skip("The password jsonString is not the defined one, skip the case!")
}
ccmPodNameStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-cloud-controller-manager").Output()
o.Expect(err).NotTo(o.HaveOccurred())
ccmPodNames := strings.Split(ccmPodNameStr, " ")
defer func() {
err := waitForPodWithLabelReady(oc, "openshift-cloud-controller-manager", podLabel)
exutil.AssertWaitPollNoErr(err, "pod recovery fails!")
}()
defer oc.AsAdmin().WithoutNamespace().NotShowInfo().Run("patch").Args("secret", secretName, "-n", "kube-system", "-p", patchPath+`"`+currentSecret+`"}}`, "--type=merge").Output()
_, err = oc.AsAdmin().WithoutNamespace().NotShowInfo().Run("patch").Args("secret", secretName, "-n", "kube-system", "-p", patchPath+`"`+base64.StdEncoding.EncodeToString([]byte(exutil.GetRandomString()))+`"}}`, "--type=merge").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Wait old ccm pods disappear")
for _, value := range ccmPodNames {
err = waitForResourceToDisappear(oc, "openshift-cloud-controller-manager", "pod/"+value)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("CCM %v failed to fully terminate", "pod/"+value))
}
}) | |||||
test case | openshift/openshift-tests-private | 09b09e92-cca9-4256-bae2-f6372e0b6aa6 | Author:zhsun-NonHyperShiftHOST-High-72120-Pull images from ACR repository should succeed [Disruptive] | ['"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-High-72120-Pull images from ACR repository should succeed [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Azure)
azureCloudName, azureErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
o.Expect(azureErr).NotTo(o.HaveOccurred())
if azureCloudName == "AzureStackCloud" || azureCloudName == "AzureUSGovernmentCloud" {
g.Skip("Skip for ASH and azure Gov due to we didn't create container registry on them!")
}
if exutil.IsSTSCluster(oc) {
g.Skip("Skip on STS cluster, as MSI not available")
}
exutil.By("Create RoleAssignments for resourcegroup")
infrastructureID := clusterinfra.GetInfrastructureName(oc)
identityName := infrastructureID + "-identity"
resourceGroup, err := exutil.GetAzureCredentialFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
az, sessErr := exutil.NewAzureSessionFromEnv()
o.Expect(sessErr).NotTo(o.HaveOccurred())
principalId, _ := exutil.GetUserAssignedIdentityPrincipalID(az, resourceGroup, identityName)
roleAssignmentName, scope := "", ""
defer func() {
err := exutil.DeleteRoleAssignments(az, roleAssignmentName, scope)
o.Expect(err).NotTo(o.HaveOccurred())
}()
//AcrPull id is 7f951dda-4ed3-4680-a7ca-43fe172d538d, check from https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#containers
roleAssignmentName, scope = exutil.GrantRoleToPrincipalIDByResourceGroup(az, principalId, "os4-common", "7f951dda-4ed3-4680-a7ca-43fe172d538d")
exutil.By("Create a new project for testing")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", "hello-acr72120").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", "hello-acr72120", "--ignore-not-found", "--force").Execute()
exutil.By("Create a new app using the image on ACR")
err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("--name=hello-acr", "--image=zhsunregistry.azurecr.io/hello-acr:latest", "--allow-missing-images", "-n", "hello-acr72120").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait the pod ready")
err = waitForPodWithLabelReady(oc, "hello-acr72120", "deployment=hello-acr")
exutil.AssertWaitPollNoErr(err, "the pod failed to be ready state within allowed time!")
}) | |||||
test case | openshift/openshift-tests-private | 02b775de-72d8-4a6e-b3e8-5f530635cc14 | Author:zhsun-NonHyperShiftHOST-Medium-74047-The cloud-provider and cloud-config flags should be removed from KCM/KAS [Flaky] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-74047-The cloud-provider and cloud-config flags should be removed from KCM/KAS [Flaky]", func() {
SkipIfCloudControllerManagerNotDeployed(oc)
g.By("Check no `cloud-provider` and `cloud-config` set on KCM and KAS")
kapi, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/config", "-n", "openshift-kube-apiserver", "-o=jsonpath={.data.config\\.yaml}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kapi).NotTo(o.ContainSubstring("cloud-provider"))
o.Expect(kapi).NotTo(o.ContainSubstring("cloud-config"))
kcm, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm/config", "-n", "openshift-kube-controller-manager", "-o=jsonpath={.data.config\\.yaml}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(kcm).NotTo(o.ContainSubstring("cloud-provider"))
o.Expect(kcm).NotTo(o.ContainSubstring("cloud-config"))
g.By("Check no `cloud-config` set on kubelet, but `--cloud-provider=external` still set on kubelet")
masterkubelet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineconfig/01-master-kubelet", "-o=jsonpath={.spec.config.systemd.units[1].contents}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(masterkubelet).To(o.ContainSubstring("cloud-provider=external"))
o.Expect(masterkubelet).NotTo(o.ContainSubstring("cloud-config"))
workerkubelet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineconfig/01-worker-kubelet", "-o=jsonpath={.spec.config.systemd.units[1].contents}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(workerkubelet).NotTo(o.ContainSubstring("cloud-config"))
o.Expect(workerkubelet).To(o.ContainSubstring("cloud-provider=external"))
}) | ||||||
test case | openshift/openshift-tests-private | 664c77e6-46d1-407f-8511-f9888ab8f245 | Author:zhsun-NonHyperShiftHOST-Low-70682-Trust bundle CA configmap should have ownership annotations | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-Low-70682-Trust bundle CA configmap should have ownership annotations", func() {
g.By("Check cluster does not have basecap set as None")
baseCapSet, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].spec.capabilities.baselineCapabilitySet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if baseCapSet == "None" {
g.Skip("Skip test when ccm co is not available")
}
out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "ccm-trusted-ca", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.metadata.annotations}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("Cloud Compute / Cloud Controller Manager"))
}) | ||||||
test case | openshift/openshift-tests-private | e0ebcba1-d2e2-42c7-9caf-fe20e974771e | Author:zhsun-NonHyperShiftHOST-High-73119-Create Internal LB service on aws/gcp/azure | ['"path/filepath"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-High-73119-Create Internal LB service on aws/gcp/azure", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
ccmBaseDir := exutil.FixturePath("testdata", "clusterinfrastructure", "ccm")
svc := filepath.Join(ccmBaseDir, "svc-loadbalancer-with-annotations.yaml")
lbNamespace := "ns-73119"
defer oc.DeleteSpecifiedNamespaceAsAdmin(lbNamespace)
oc.CreateSpecifiedNamespaceAsAdmin(lbNamespace)
exutil.SetNamespacePrivileged(oc, lbNamespace)
svcForSubnet := loadBalancerServiceDescription{
template: svc,
name: "internal-lb-73119",
namespace: lbNamespace,
}
if iaasPlatform == clusterinfra.AWS {
exutil.By("Get worker private subnetID")
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).ShouldNot(o.HaveOccurred())
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSessionWithRegion(region)
machineName := clusterinfra.ListMasterMachineNames(oc)[0]
instanceID, err := awsClient.GetAwsInstanceID(machineName)
o.Expect(err).NotTo(o.HaveOccurred())
vpcID, err := awsClient.GetAwsInstanceVPCId(instanceID)
o.Expect(err).NotTo(o.HaveOccurred())
subnetIds, err := awsClient.GetAwsPrivateSubnetIDs(vpcID)
o.Expect(subnetIds).ShouldNot(o.BeEmpty())
o.Expect(err).NotTo(o.HaveOccurred())
svcForSubnet.awssubnet = subnetIds[0]
}
if iaasPlatform == clusterinfra.GCP {
svcForSubnet.gcptype = "internal"
}
if iaasPlatform == clusterinfra.Azure {
defaultWorkerMachinesetName := clusterinfra.GetRandomMachineSetName(oc)
subnet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachineset, defaultWorkerMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.spec.template.spec.providerSpec.value.subnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
svcForSubnet.azureinternal = true
svcForSubnet.azuresubnet = subnet
}
exutil.By("Create internal loadBalancerService")
defer svcForSubnet.deleteLoadBalancerService(oc)
svcForSubnet.createLoadBalancerService(oc)
g.By("Check External-IP assigned")
getLBSvcIP(oc, svcForSubnet)
exutil.By("Get the Interanl LB ingress ip or hostname")
// AWS, IBMCloud use hostname, other cloud platforms use ip
internalLB, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", lbNamespace, "service", svcForSubnet.name, "-o=jsonpath={.status.loadBalancer.ingress}").Output()
e2e.Logf("the internal LB is %v", internalLB)
if iaasPlatform == clusterinfra.AWS {
o.Expect(internalLB).To(o.MatchRegexp(`"hostname":.*elb.*amazonaws.com`))
} else {
o.Expect(internalLB).To(o.MatchRegexp(`"ip":"10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"`))
}
}) | |||||
test case | openshift/openshift-tests-private | 52ce6be6-5fd1-45e4-833f-0fec1fdd7731 | Author:zhsun-NonHyperShiftHOST-Medium-70621-cloud-controller-manager should be Upgradeable is True when Degraded is False [Disruptive] | ['"os"', '"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-70621-cloud-controller-manager should be Upgradeable is True when Degraded is False [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.IBMCloud, clusterinfra.Nutanix, clusterinfra.VSphere, clusterinfra.OpenStack)
ccm, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager").Output()
if !strings.Contains(ccm, "cloud-controller-manager") {
g.Skip("This case is not executable when cloud-controller-manager CO is absent")
}
e2e.Logf("Delete cm to make co cloud-controller-manager Degraded=True")
cloudProviderConfigCMFile, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "cloud-provider-config", "-n", "openshift-config", "-oyaml").OutputToFile("70621-cloud-provider-config-cm.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "cloud-provider-config", "-n", "openshift-config").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
os.Remove(cloudProviderConfigCMFile)
}()
defer func() {
e2e.Logf("Recreate the deleted cm to recover cluster, cm kube-cloud-config can be recreated by cluster")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", cloudProviderConfigCMFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
state, checkClusterOperatorConditionErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager", "-o", "jsonpath={.status.conditions[?(@.type==\"Degraded\")].status}{.status.conditions[?(@.type==\"Upgradeable\")].status}").Output()
o.Expect(checkClusterOperatorConditionErr).NotTo(o.HaveOccurred())
o.Expect(state).To(o.ContainSubstring("FalseTrue"))
}()
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("configmap", "kube-cloud-config", "-n", "openshift-config-managed").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Co cloud-controller-manager Degraded=True, Upgradeable=false")
state, checkClusterOperatorConditionErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager", "-o", "jsonpath={.status.conditions[?(@.type==\"Degraded\")].status}{.status.conditions[?(@.type==\"Upgradeable\")].status}").Output()
o.Expect(checkClusterOperatorConditionErr).NotTo(o.HaveOccurred())
o.Expect(state).To(o.ContainSubstring("TrueFalse"))
}) | |||||
test case | openshift/openshift-tests-private | a0a21d6a-a24d-4d13-b289-f00739473f06 | Author:miyadav-NonHyperShiftHOST-Medium-63778-cloud-controller-manager should be Upgradeable is True on None clusters | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:miyadav-NonHyperShiftHOST-Medium-63778-cloud-controller-manager should be Upgradeable is True on None clusters", func() {
exutil.SkipIfPlatformTypeNot(oc, "None")
g.By("Check Upgradeable status is True")
status, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator", "cloud-controller-manager", `-o=jsonpath={.status.conditions[?(@.type=="Upgradeable")].status}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(status, "True") != 0 {
e2e.Failf("Upgradeable status is not True")
}
}) | |||||
test case | openshift/openshift-tests-private | 8cc988cc-a25a-42c4-bb73-9cdff9402294 | Author:zhsun-NonHyperShiftHOST-Medium-69871-Cloud Controller Manager Operator metrics should only be available via https | ['"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-69871-Cloud Controller Manager Operator metrics should only be available via https", func() {
g.By("Check cluster does not have basecap set as None")
baseCapSet, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].spec.capabilities.baselineCapabilitySet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if baseCapSet == "None" {
g.Skip("Skip test when ccm co is not available")
}
podName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items[0].metadata.name}", "-l", "k8s-app=cloud-manager-operator", "-n", "openshift-cloud-controller-manager-operator").Output()
o.Expect(err).NotTo(o.HaveOccurred())
url_http := "http://127.0.0.0:9257/metrics"
url_https := "https://127.0.0.0:9258/metrics"
curlOutputHttp, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", "openshift-cloud-controller-manager-operator", "-i", "--", "curl", url_http).Output()
o.Expect(curlOutputHttp).To(o.ContainSubstring("Connection refused"))
curlOutputHttps, _ := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", "openshift-cloud-controller-manager-operator", "-i", "--", "curl", url_https).Output()
o.Expect(curlOutputHttps).To(o.ContainSubstring("SSL certificate problem"))
}) | |||||
test case | openshift/openshift-tests-private | d6ada805-0684-44c2-b070-65568f7cdca0 | Author:miyadav-Low-70124-system:openshift:kube-controller-manager:gce-cloud-provider referencing non existing serviceAccount | ['"strings"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:miyadav-Low-70124-system:openshift:kube-controller-manager:gce-cloud-provider referencing non existing serviceAccount", func() {
_, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterrolebinding", "system:openshift:kube-controller-manager:gce-cloud-provider").Output()
o.Expect(err).To(o.HaveOccurred())
platformType := clusterinfra.CheckPlatform(oc)
if platformType == clusterinfra.GCP {
sa, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sa", "cloud-provider", "-n", "kube-system").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(sa, "cloud-provider")).To(o.BeTrue())
} else {
_, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sa", "cloud-provider", "-n", "kube-system").Output()
o.Expect(err).To(o.HaveOccurred())
}
}) | |||||
test case | openshift/openshift-tests-private | 8c36407c-f639-46b9-9856-f8529dbd39e0 | Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-70566-Garbage in cloud-controller-manager status [Disruptive] | ['"strings"', '"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/ccm.go | g.It("Author:miyadav-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-70566-Garbage in cloud-controller-manager status [Disruptive]", func() {
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.AlibabaCloud, clusterinfra.VSphere, clusterinfra.IBMCloud)
g.By("Delete the namespace openshift-cloud-controller-manager")
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", "openshift-cloud-controller-manager").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.ContainSubstring("project.project.openshift.io \"openshift-cloud-controller-manager\" deleted"))
defer func() {
err = wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) {
g.By("Check co cloud-controller-manager is back")
state, checkCloudControllerManagerErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager", "-o", "jsonpath={.status.conditions}").Output()
if checkCloudControllerManagerErr != nil {
e2e.Logf("try next because of err %v", checkCloudControllerManagerErr)
return false, nil
}
if strings.Contains(state, "Trusted CA Bundle Controller works as expected") {
e2e.Logf("Co is back now")
return true, nil
}
e2e.Logf("Still waiting up to 1 minute ...")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "co is not recovered")
}()
g.By("Check co cloud-controller-manager error message")
state, checkCloudControllerManagerErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "cloud-controller-manager", "-o", "jsonpath={.status.conditions}").Output()
o.Expect(checkCloudControllerManagerErr).NotTo(o.HaveOccurred())
o.Expect(state).To(o.ContainSubstring("TrustedCABundleControllerControllerDegraded condition is set to True"))
}) | |||||
test | openshift/openshift-tests-private | 2badfe2f-92a6-45b2-b258-70bb85e522da | control_plane_machineset | import (
"fmt"
"math/rand"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | package clusterinfrastructure
import (
"fmt"
"math/rand"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
const (
changeInstanceTypeCon = "changeInstanceType"
backupInstanceTypeCon = "backupInstanceType"
getInstanceTypeJSONCon = "getInstanceTypeJSON"
patchInstanceTypePrefixCon = "patchInstanceTypePrefix"
patchInstanceTypeSuffixCon = "patchInstanceTypeSuffix"
getMachineAvailabilityZoneJSONCon = "getMachineAvailabilityZoneJSON"
getCPMSAvailabilityZonesJSONCon = "getCPMSAvailabilityZonesJSON"
updateFieldsCon = "updateFields"
recoverFieldsCon = "recoverFields"
getSpecificFieldJSONCon = "getSpecificFieldJSON"
patchSpecificFieldPrefixCon = "patchSpecificFieldPrefix"
patchSpecificFieldSuffixCon = "patchSpecificFieldSuffix"
getMachineFieldValueJSONCon = "getMachineFieldValueJSON"
changeSpecificFieldCon = "changeSpecificField"
backupSpecificFieldCon = "backupSpecificField"
customMasterMachineNamePrefix = "custom.master.name-78772"
customMasterMachineNamePrefixGCP = "custom-master-name-78772"
)
var _ = g.Describe("[sig-cluster-lifecycle] Cluster_Infrastructure CPMS MAPI", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("control-plane-machineset", exutil.KubeConfigPath())
iaasPlatform clusterinfra.PlatformType
changeToBackupInstanceType = map[clusterinfra.PlatformType]map[architecture.Architecture]map[string]string{
clusterinfra.AWS: {architecture.AMD64: {changeInstanceTypeCon: "m5.xlarge", backupInstanceTypeCon: "m6i.xlarge"},
architecture.ARM64: {changeInstanceTypeCon: "m6gd.xlarge", backupInstanceTypeCon: "m6g.xlarge"}},
clusterinfra.Azure: {architecture.AMD64: {changeInstanceTypeCon: "Standard_D4s_v3", backupInstanceTypeCon: "Standard_D8s_v3"},
architecture.ARM64: {changeInstanceTypeCon: "Standard_D4ps_v5", backupInstanceTypeCon: "Standard_D8ps_v5"}},
clusterinfra.GCP: {architecture.AMD64: {changeInstanceTypeCon: "e2-standard-4", backupInstanceTypeCon: "n2-standard-4"},
architecture.ARM64: {changeInstanceTypeCon: "t2a-standard-8", backupInstanceTypeCon: "t2a-standard-4"}},
}
getInstanceTypeJsonByCloud = map[clusterinfra.PlatformType]map[string]string{
clusterinfra.AWS: {getInstanceTypeJSONCon: "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.instanceType}",
patchInstanceTypePrefixCon: `{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"spec":{"providerSpec":{"value":{"instanceType":`,
patchInstanceTypeSuffixCon: `}}}}}}}`},
clusterinfra.Azure: {getInstanceTypeJSONCon: "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.vmSize}",
patchInstanceTypePrefixCon: `{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"spec":{"providerSpec":{"value":{"vmSize":`,
patchInstanceTypeSuffixCon: `}}}}}}}`},
clusterinfra.GCP: {getInstanceTypeJSONCon: "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.machineType}",
patchInstanceTypePrefixCon: `{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"spec":{"providerSpec":{"value":{"machineType":`,
patchInstanceTypeSuffixCon: `}}}}}}}`},
}
getSpecificFieldJsonByCloud = map[clusterinfra.PlatformType]map[string]string{
clusterinfra.Nutanix: {getSpecificFieldJSONCon: "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.vcpusPerSocket}",
patchSpecificFieldPrefixCon: `{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"spec":{"providerSpec":{"value":{"vcpusPerSocket":`,
patchSpecificFieldSuffixCon: `}}}}}}}`,
getMachineFieldValueJSONCon: "-o=jsonpath={.spec.providerSpec.value.vcpusPerSocket}"},
clusterinfra.VSphere: {getSpecificFieldJSONCon: "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.diskGiB}",
patchSpecificFieldPrefixCon: `{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"spec":{"providerSpec":{"value":{"diskGiB":`,
patchSpecificFieldSuffixCon: `}}}}}}}`,
getMachineFieldValueJSONCon: "-o=jsonpath={.spec.providerSpec.value.diskGiB}"},
}
changeToBackupSpecificField = map[clusterinfra.PlatformType]map[string]string{
clusterinfra.Nutanix: {changeSpecificFieldCon: "2", backupSpecificFieldCon: "1"},
clusterinfra.VSphere: {changeSpecificFieldCon: "130", backupSpecificFieldCon: "120"},
}
otherUpdateFieldsByCloud = map[clusterinfra.PlatformType]map[string]string{
clusterinfra.AWS: {updateFieldsCon: `,"placementGroupPartition":3,"placementGroupName":"pgpartition3"`,
recoverFieldsCon: `,"placementGroupPartition":null,"placementGroupName":null`},
clusterinfra.Azure: {updateFieldsCon: ``,
recoverFieldsCon: ``},
clusterinfra.GCP: {updateFieldsCon: ``,
recoverFieldsCon: ``},
}
getAvailabilityZoneJSONByCloud = map[clusterinfra.PlatformType]map[string]string{
clusterinfra.AWS: {getMachineAvailabilityZoneJSONCon: "-o=jsonpath={.spec.providerSpec.value.placement.availabilityZone}",
getCPMSAvailabilityZonesJSONCon: "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains.aws[*].placement.availabilityZone}"},
clusterinfra.Azure: {getMachineAvailabilityZoneJSONCon: "-o=jsonpath={.spec.providerSpec.value.zone}",
getCPMSAvailabilityZonesJSONCon: "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains.azure[*].zone}"},
clusterinfra.GCP: {getMachineAvailabilityZoneJSONCon: "-o=jsonpath={.spec.providerSpec.value.zone}",
getCPMSAvailabilityZonesJSONCon: "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains.gcp[*].zone}"},
}
)
g.BeforeEach(func() {
exutil.SkipForSNOCluster(oc)
iaasPlatform = clusterinfra.CheckPlatform(oc)
})
g.It("Author:zhsun-NonHyperShiftHOST-High-56086-Controlplanemachineset should be created by default", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.Nutanix, clusterinfra.VSphere)
g.By("CPMS should be created by default and state is Active")
cpmsState, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace, "-o=jsonpath={.spec.state}").Output()
o.Expect(cpmsState).To(o.ContainSubstring("Active"))
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-53320-Owner reference could be added/removed to control plan machines [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
g.By("Check ownerReferences is added to master machines")
masterMachineList := clusterinfra.ListMasterMachineNames(oc)
for _, masterMachineName := range masterMachineList {
ownerReferences, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, masterMachineName, "-o=jsonpath={.metadata.ownerReferences}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ownerReferences).ShouldNot(o.BeEmpty())
}
g.By("Delete controlplanemachineset")
defer printNodeInfo(oc)
defer activeControlPlaneMachineSet(oc)
deleteControlPlaneMachineSet(oc)
g.By("Check ownerReferences is removed from master machines")
err := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
cpmsState, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace, "-o=jsonpath={.spec.state}").Output()
if cpmsState == "Inactive" {
for _, masterMachineName := range masterMachineList {
ownerReferences, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, masterMachineName, "-o=jsonpath={.metadata.ownerReferences}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ownerReferences).Should(o.BeEmpty())
}
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "controlplanemachineset is not re-created")
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-53081-Finalizer should be added to control plan machineset [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
g.By("Check finalizer is added to controlplanemachineset")
finalizers, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.metadata.finalizers[0]}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(finalizers).To(o.ContainSubstring("controlplanemachineset.machine.openshift.io"))
g.By("Remove finalizer")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"metadata":{"finalizers":null}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Finalizer should be re-added to controlplanemachineset")
finalizers, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.metadata.finalizers[0]}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(finalizers).To(o.ContainSubstring("controlplanemachineset.machine.openshift.io"))
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-High-53610-Operator control-plane-machine-set should be in Available state and report version information", func() {
capability, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o=jsonpath={.status.capabilities.enabledCapabilities}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(capability, "MachineAPI") {
g.Skip("MachineAPI not enabled so co control-plane-machine-set wont be present")
}
state, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/control-plane-machine-set", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
version, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/control-plane-machine-set", "-o=jsonpath={.status.versions[0].version}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.ContainSubstring("TrueFalseFalse"))
o.Expect(version).To(o.ContainSubstring("4."))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-53323-78772-Implement update logic for RollingUpdate CPMS strategy update instance type [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
controlPlaneArch := architecture.GetControlPlaneArch(oc)
changeInstanceType := changeToBackupInstanceType[iaasPlatform][controlPlaneArch][changeInstanceTypeCon]
backupInstanceType := changeToBackupInstanceType[iaasPlatform][controlPlaneArch][backupInstanceTypeCon]
if iaasPlatform == clusterinfra.GCP && controlPlaneArch == architecture.AMD64 {
confidentialCompute, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.confidentialCompute}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if confidentialCompute == "Enabled" {
changeInstanceType = "c2d-standard-4"
backupInstanceType = "n2d-standard-4"
}
}
g.By("Get current instanceType")
currentInstanceType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getInstanceTypeJsonByCloud[iaasPlatform][getInstanceTypeJSONCon], "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentInstanceType:%s", currentInstanceType)
if currentInstanceType == changeInstanceType {
changeInstanceType = backupInstanceType
}
labelsAfter := "machine.openshift.io/instance-type=" + changeInstanceType + ",machine.openshift.io/cluster-api-machine-type=master"
labelsBefore := "machine.openshift.io/instance-type=" + currentInstanceType + ",machine.openshift.io/cluster-api-machine-type=master"
g.By("Check if any other fields need to be updated")
otherUpdateFields := otherUpdateFieldsByCloud[iaasPlatform][updateFieldsCon]
otherRecoverFields := otherUpdateFieldsByCloud[iaasPlatform][recoverFieldsCon]
if iaasPlatform == clusterinfra.AWS {
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
_, err := awsClient.GetPlacementGroupByName("pgpartition3")
if err != nil {
otherUpdateFields = ``
otherRecoverFields = ``
}
}
patchstrChange := getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypePrefixCon] + `"` + changeInstanceType + `"` + otherUpdateFields + getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypeSuffixCon]
patchstrRecover := getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypePrefixCon] + `"` + currentInstanceType + `"` + otherRecoverFields + getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypeSuffixCon]
g.By("Change instanceType to trigger RollingUpdate")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer waitForCPMSUpdateCompleted(oc, 1)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrRecover, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":null}}`, "--type=merge", "-n", machineAPINamespace).Execute()
customMachineName := customMasterMachineNamePrefix
if iaasPlatform == clusterinfra.GCP {
customMachineName = customMasterMachineNamePrefixGCP
}
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":"`+customMachineName+`"}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrChange, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
/*
The RollingUpdate will update all the master machines one by one,
here only check the first machine updated success, then consider the case passed to save time,
because all the machines update are the same, so I think it's ok to assumpt that.
*/
updatedMachineName := clusterinfra.WaitForMachinesRunningByLabel(oc, 1, labelsAfter)[0]
e2e.Logf("updatedMachineName:%s", updatedMachineName)
if exutil.IsTechPreviewNoUpgrade(oc) {
o.Expect(updatedMachineName).To(o.HavePrefix(customMachineName))
}
suffix := getMachineSuffix(oc, updatedMachineName)
e2e.Logf("suffix:%s", suffix)
clusterinfra.WaitForMachineDisappearBySuffix(oc, suffix, labelsBefore)
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-53323-78772-Implement update logic for RollingUpdate CPMS strategy update some field [Disruptive]", func() {
//For the providers which don't have instance type, we will update some other field to trigger update
//For nutanix, we choose vcpusPerSocket
//For vsphere, we choose diskGiB
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix, clusterinfra.VSphere)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
var changeFieldValue, backupFieldValue, getFieldValueJSON string
var patchstrPrefix, patchstrSuffix string
changeFieldValue = changeToBackupSpecificField[iaasPlatform][changeSpecificFieldCon]
backupFieldValue = changeToBackupSpecificField[iaasPlatform][backupSpecificFieldCon]
getFieldValueJSON = getSpecificFieldJsonByCloud[iaasPlatform][getSpecificFieldJSONCon]
patchstrPrefix = getSpecificFieldJsonByCloud[iaasPlatform][patchSpecificFieldPrefixCon]
patchstrSuffix = getSpecificFieldJsonByCloud[iaasPlatform][patchSpecificFieldSuffixCon]
g.By("Get current field value")
currentFieldValue, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getFieldValueJSON, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentFieldValue:%s", currentFieldValue)
if currentFieldValue == changeFieldValue {
changeFieldValue = backupFieldValue
}
getMachineFieldValueJSON := getSpecificFieldJsonByCloud[iaasPlatform][getMachineFieldValueJSONCon]
patchstrChange := patchstrPrefix + changeFieldValue + patchstrSuffix
patchstrRecover := patchstrPrefix + currentFieldValue + patchstrSuffix
g.By("Change field value to trigger RollingUpdate")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer waitForCPMSUpdateCompleted(oc, 1)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrRecover, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":null}}`, "--type=merge", "-n", machineAPINamespace).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":"`+customMasterMachineNamePrefix+`"}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrChange, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
labelMaster := "machine.openshift.io/cluster-api-machine-type=master"
updatedMachineName := clusterinfra.WaitForMachineRunningByField(oc, getMachineFieldValueJSON, changeFieldValue, labelMaster)
e2e.Logf("updatedMachineName:%s", updatedMachineName)
if exutil.IsTechPreviewNoUpgrade(oc) {
o.Expect(updatedMachineName).To(o.HavePrefix(customMasterMachineNamePrefix))
}
suffix := getMachineSuffix(oc, updatedMachineName)
e2e.Logf("suffix:%s", suffix)
clusterinfra.WaitForMachineDisappearBySuffixAndField(oc, suffix, getMachineFieldValueJSON, currentFieldValue, labelMaster)
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55631-Implement update logic for RollingUpdate CPMS strategy - Delete a master machine [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
g.By("Random pick a master machine")
machineName := clusterinfra.ListMasterMachineNames(oc)[rand.Int31n(int32(len(clusterinfra.ListMasterMachineNames(oc))))]
suffix := getMachineSuffix(oc, machineName)
var getMachineAvailabilityZoneJSON string
labels := "machine.openshift.io/cluster-api-machine-type=master"
if iaasPlatform == clusterinfra.AWS || iaasPlatform == clusterinfra.Azure || iaasPlatform == clusterinfra.GCP {
getMachineAvailabilityZoneJSON = getAvailabilityZoneJSONByCloud[iaasPlatform][getMachineAvailabilityZoneJSONCon]
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machineName, "-n", "openshift-machine-api", getMachineAvailabilityZoneJSON).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if availabilityZone != "" {
labels = "machine.openshift.io/zone=" + availabilityZone + ",machine.openshift.io/cluster-api-machine-type=master"
}
}
g.By("Delete the master machine to trigger RollingUpdate")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMachine, machineName, "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineRunningBySuffix(oc, suffix, labels)
clusterinfra.WaitForMachineDisappearByName(oc, machineName)
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-54005-78772-Control plane machine set OnDelete update strategies - update instance type [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
controlPlaneArch := architecture.GetControlPlaneArch(oc)
changeInstanceType := changeToBackupInstanceType[iaasPlatform][controlPlaneArch][changeInstanceTypeCon]
backupInstanceType := changeToBackupInstanceType[iaasPlatform][controlPlaneArch][backupInstanceTypeCon]
if iaasPlatform == clusterinfra.GCP && controlPlaneArch == architecture.AMD64 {
confidentialCompute, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.confidentialCompute}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if confidentialCompute == "Enabled" {
changeInstanceType = "c2d-standard-4"
backupInstanceType = "n2d-standard-4"
}
}
g.By("Get current instanceType")
currentInstanceType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getInstanceTypeJsonByCloud[iaasPlatform][getInstanceTypeJSONCon], "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentInstanceType:%s", currentInstanceType)
if currentInstanceType == changeInstanceType {
changeInstanceType = backupInstanceType
}
labelsAfter := "machine.openshift.io/instance-type=" + changeInstanceType + ",machine.openshift.io/cluster-api-machine-type=master"
patchstrChange := getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypePrefixCon] + `"` + changeInstanceType + `"` + getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypeSuffixCon]
patchstrRecover := getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypePrefixCon] + `"` + currentInstanceType + `"` + getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypeSuffixCon]
g.By("Update strategy to OnDelete, change instanceType to trigger OnDelete update")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer waitForCPMSUpdateCompleted(oc, 1)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":null}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrRecover, "--type=merge", "-n", machineAPINamespace).Execute()
defer waitForClusterStable(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"OnDelete"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
customMachineName := customMasterMachineNamePrefix
if iaasPlatform == clusterinfra.GCP {
customMachineName = customMasterMachineNamePrefixGCP
}
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":"`+customMachineName+`"}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrChange, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete one master manually")
toDeletedMachineName := clusterinfra.ListMasterMachineNames(oc)[rand.Int31n(int32(len(clusterinfra.ListMasterMachineNames(oc))))]
clusterinfra.DeleteMachine(oc, toDeletedMachineName)
g.By("Check new master will be created and old master will be deleted")
newCreatedMachineName := clusterinfra.WaitForMachinesRunningByLabel(oc, 1, labelsAfter)[0]
e2e.Logf("newCreatedMachineName:%s", newCreatedMachineName)
if exutil.IsTechPreviewNoUpgrade(oc) {
o.Expect(newCreatedMachineName).To(o.HavePrefix(customMachineName))
}
clusterinfra.WaitForMachineDisappearByName(oc, toDeletedMachineName)
waitForClusterStable(oc)
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Author:huliu-Medium-54005-78772-Control plane machine set OnDelete update strategies - update some field [Disruptive]", func() {
//For the providers which don't have instance type, we will update some other field to trigger update
//For nutanix, we choose vcpusPerSocket
//For vsphere, we choose diskGiB
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix, clusterinfra.VSphere)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
var changeFieldValue, backupFieldValue, getFieldValueJSON string
var patchstrPrefix, patchstrSuffix string
changeFieldValue = changeToBackupSpecificField[iaasPlatform][changeSpecificFieldCon]
backupFieldValue = changeToBackupSpecificField[iaasPlatform][backupSpecificFieldCon]
getFieldValueJSON = getSpecificFieldJsonByCloud[iaasPlatform][getSpecificFieldJSONCon]
patchstrPrefix = getSpecificFieldJsonByCloud[iaasPlatform][patchSpecificFieldPrefixCon]
patchstrSuffix = getSpecificFieldJsonByCloud[iaasPlatform][patchSpecificFieldSuffixCon]
g.By("Get current field value")
currentFieldValue, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getFieldValueJSON, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentFieldValue:%s", currentFieldValue)
if currentFieldValue == changeFieldValue {
changeFieldValue = backupFieldValue
}
getMachineFieldValueJSON := getSpecificFieldJsonByCloud[iaasPlatform][getMachineFieldValueJSONCon]
patchstrChange := patchstrPrefix + changeFieldValue + patchstrSuffix
patchstrRecover := patchstrPrefix + currentFieldValue + patchstrSuffix
g.By("Update strategy to OnDelete, change field value to trigger OnDelete update")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer waitForCPMSUpdateCompleted(oc, 1)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":null}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrRecover, "--type=merge", "-n", machineAPINamespace).Execute()
defer waitForClusterStable(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"OnDelete"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":"`+customMasterMachineNamePrefix+`"}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrChange, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete one master manually")
toDeletedMachineName := clusterinfra.ListMasterMachineNames(oc)[rand.Int31n(int32(len(clusterinfra.ListMasterMachineNames(oc))))]
clusterinfra.DeleteMachine(oc, toDeletedMachineName)
g.By("Check new master will be created and old master will be deleted")
labelMaster := "machine.openshift.io/cluster-api-machine-type=master"
newCreatedMachineName := clusterinfra.WaitForMachineRunningByField(oc, getMachineFieldValueJSON, changeFieldValue, labelMaster)
e2e.Logf("newCreatedMachineName:%s", newCreatedMachineName)
if exutil.IsTechPreviewNoUpgrade(oc) {
o.Expect(newCreatedMachineName).To(o.HavePrefix(customMasterMachineNamePrefix))
}
clusterinfra.WaitForMachineDisappearByName(oc, toDeletedMachineName)
waitForClusterStable(oc)
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55724-Control plane machine set OnDelete update strategies - Delete/Add a failureDomain [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
g.By("Check failureDomains")
availabilityZones := getCPMSAvailabilityZones(oc, iaasPlatform)
if len(availabilityZones) <= 1 {
g.Skip("Skip for the failureDomains is no more than 1")
}
g.By("Update strategy to OnDelete")
key, value, machineName := getZoneAndMachineFromCPMSZones(oc, availabilityZones)
getMachineAvailabilityZoneJSON := getAvailabilityZoneJSONByCloud[iaasPlatform][getMachineAvailabilityZoneJSONCon]
getCPMSAvailabilityZonesJSON := getAvailabilityZoneJSONByCloud[iaasPlatform][getCPMSAvailabilityZonesJSONCon]
deleteFailureDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains."+iaasPlatform.String()+"["+strconv.Itoa(key)+"]}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer func() {
availabilityZonesStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getCPMSAvailabilityZonesJSON, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(availabilityZonesStr, value) {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
waitForCPMSUpdateCompleted(oc, 1)
}
}()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer waitForClusterStable(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"OnDelete"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Pick the failureDomain which has only one master machine and delete the failureDomain")
suffix := getMachineSuffix(oc, machineName)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"remove","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/`+strconv.Itoa(key)+`"}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete the master machine in the selected failureDomain")
clusterinfra.DeleteMachine(oc, machineName)
g.By("Check new master will be created in other zones and old master will be deleted")
labelsBefore := "machine.openshift.io/zone=" + value + ",machine.openshift.io/cluster-api-machine-type=master"
labelsAfter := "machine.openshift.io/zone!=" + value + ",machine.openshift.io/cluster-api-machine-type=master"
newMachineNameRolledWithFailureDomain := clusterinfra.WaitForMachineRunningBySuffix(oc, suffix, labelsAfter)
clusterinfra.WaitForMachineDisappearBySuffix(oc, suffix, labelsBefore)
waitForClusterStable(oc)
g.By("Check if it will rebalance the machines")
availabilityZones = getCPMSAvailabilityZones(oc, iaasPlatform)
if len(availabilityZones) >= 3 {
e2e.Logf("availabilityZones>=3 means the three master machines are in different zones now, it will not rebalance when adding new zone")
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(checkIfCPMSCoIsStable(oc)).To(o.BeTrue())
} else {
g.By("Add the failureDomain back to check OnDelete strategy rebalance the machines")
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, newMachineNameRolledWithFailureDomain, "-n", "openshift-machine-api", getMachineAvailabilityZoneJSON).Output()
o.Expect(err).NotTo(o.HaveOccurred())
labelsAfter = "machine.openshift.io/zone=" + availabilityZone + ",machine.openshift.io/cluster-api-machine-type=master"
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
g.By("Delete the new created master machine ")
clusterinfra.DeleteMachine(oc, newMachineNameRolledWithFailureDomain)
g.By("Check new master will be created in new added zone and old master will be deleted")
newMachineNameRolledBalancedFailureDomain := clusterinfra.WaitForMachinesRunningByLabel(oc, 1, labelsBefore)[0]
e2e.Logf("updatedMachineName:%s", newMachineNameRolledBalancedFailureDomain)
suffix = getMachineSuffix(oc, newMachineNameRolledBalancedFailureDomain)
clusterinfra.WaitForMachineDisappearBySuffix(oc, suffix, labelsAfter)
waitForClusterStable(oc)
}
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55725-Control plane machine set OnDelete update strategies - Delete a master machine [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
g.By("Update strategy to OnDelete")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer waitForClusterStable(oc)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"OnDelete"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Random pick a master machine and delete manually to trigger OnDelete update")
toDeletedMachineName := clusterinfra.ListMasterMachineNames(oc)[rand.Int31n(int32(len(clusterinfra.ListMasterMachineNames(oc))))]
var getMachineAvailabilityZoneJSON string
labels := "machine.openshift.io/cluster-api-machine-type=master"
if iaasPlatform == clusterinfra.AWS || iaasPlatform == clusterinfra.Azure || iaasPlatform == clusterinfra.GCP {
getMachineAvailabilityZoneJSON = getAvailabilityZoneJSONByCloud[iaasPlatform][getMachineAvailabilityZoneJSONCon]
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, toDeletedMachineName, "-n", "openshift-machine-api", getMachineAvailabilityZoneJSON).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if availabilityZone != "" {
labels = "machine.openshift.io/zone=" + availabilityZone + ",machine.openshift.io/cluster-api-machine-type=master"
}
}
clusterinfra.DeleteMachine(oc, toDeletedMachineName)
g.By("Check new master will be created and old master will be deleted")
suffix := getMachineSuffix(oc, toDeletedMachineName)
clusterinfra.WaitForMachineRunningBySuffix(oc, suffix, labels)
clusterinfra.WaitForMachineDisappearByName(oc, toDeletedMachineName)
waitForClusterStable(oc)
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-53328-It doesn't rearrange the availability zones if the order of the zones isn't matching in the CPMS and the Control Plane [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
skipForCPMSNotStable(oc)
g.By("Check failureDomains")
availabilityZones := getCPMSAvailabilityZones(oc, iaasPlatform)
if len(availabilityZones) <= 1 {
g.Skip("Skip for the failureDomains is no more than 1")
}
g.By("Update strategy to OnDelete so that it will not trigger update automaticly")
defer printNodeInfo(oc)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"OnDelete"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Change the failureDomain's order by deleting/adding failureDomain")
changeFailureDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains."+iaasPlatform.String()+"[1]}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"remove","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/1"}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+changeFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Update strategy to RollingUpdate check if will rearrange the availability zones and no update for masters")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
newAvailabilityZones := getCPMSAvailabilityZones(oc, iaasPlatform)
o.Expect(strings.Join(newAvailabilityZones, "")).To(o.ContainSubstring(availabilityZones[1] + availabilityZones[0] + strings.Join(availabilityZones[2:], "")))
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-54895-CPMS generator controller will create a new CPMS if a CPMS is removed from cluster [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
skipForCPMSNotStable(oc)
g.By("Delete controlplanemachineset")
defer printNodeInfo(oc)
defer activeControlPlaneMachineSet(oc)
deleteControlPlaneMachineSet(oc)
g.By("Check a new controlplanemachineset will be created and state is Inactive ")
err := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
cpmsState, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace, "-o=jsonpath={.spec.state}").Output()
if cpmsState != "Inactive" {
e2e.Logf("controlplanemachineset is not in Inactive state and waiting up to 2 seconds ...")
return false, nil
}
e2e.Logf("controlplanemachineset is in Inactive state")
return true, nil
})
exutil.AssertWaitPollNoErr(err, "controlplanemachineset is not in Inactive state")
g.By("Check controlplanemachineset do not reconcile master machines if state is Inactive")
var fieldName string
var fieldValue = "invalid"
switch iaasPlatform {
case clusterinfra.AWS:
fieldName = "instanceType"
case clusterinfra.Azure:
fieldName = "vmSize"
case clusterinfra.GCP:
fieldName = "machineType"
confidentialCompute, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.confidentialCompute}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if confidentialCompute == "Enabled" {
fieldValue = "c2d-standard-4"
}
case clusterinfra.Nutanix:
fieldName = "bootType"
fieldValue = "Legacy"
case clusterinfra.VSphere:
fieldName = "diskGiB"
fieldValue = strconv.Itoa(140)
default:
e2e.Logf("The " + iaasPlatform.String() + " Platform is not supported for now.")
}
if iaasPlatform == clusterinfra.VSphere {
// Construct JSON payload with the appropriate type handling for fieldValue
jsonPayload := fmt.Sprintf(`{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"spec":{"providerSpec":{"value":{"%s":%v}}}}}}}`, fieldName, fieldValue)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", jsonPayload, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"spec":{"providerSpec":{"value":{"`+fieldName+`":"`+fieldValue+`"}}}}}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
activeControlPlaneMachineSet(oc)
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-52587-Webhook validations for CPMS resource [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
g.By("Update CPMS name")
cpmsName, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"metadata":{"name":"invalid"}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(cpmsName).To(o.ContainSubstring("the name of the object (invalid) does not match the name on the URL (cluster)"))
g.By("Update CPMS replicas")
cpmsReplicas, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"replicas": 4}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(cpmsReplicas).To(o.ContainSubstring("Unsupported value"))
g.By("Update CPMS selector")
cpmsSelector, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"selector":{"matchLabels":{"machine.openshift.io/cluster-api-cluster": null}}}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(cpmsSelector).To(o.ContainSubstring("selector is immutable"))
g.By("Update CPMS labels")
cpmsLabel, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"template":{"machines_v1beta1_machine_openshift_io":{"metadata":{"labels":{"machine.openshift.io/cluster-api-cluster": null, "machine.openshift.io/cluster-api-machine-role": "invalid", "machine.openshift.io/cluster-api-machine-type": "invalid"}}}}}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(cpmsLabel).To(o.ContainSubstring("label 'machine.openshift.io/cluster-api-cluster' is required"))
o.Expect(cpmsLabel).To(o.ContainSubstring("label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master'"))
o.Expect(cpmsLabel).To(o.ContainSubstring("label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master'"))
g.By("Update CPMS state")
cpmsState, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"state":"Inactive"}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(cpmsState).To(o.ContainSubstring("state cannot be changed once Active"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55485-Implement update logic for RollingUpdate CPMS strategy - Delete/Add a failureDomain [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
g.By("Check failureDomains")
availabilityZones := getCPMSAvailabilityZones(oc, iaasPlatform)
if len(availabilityZones) <= 1 {
g.Skip("Skip for the failureDomains is no more than 1")
}
g.By("Pick the failureDomain which has only one master machine")
availabilityZones = getCPMSAvailabilityZones(oc, iaasPlatform)
key, value, machineName := getZoneAndMachineFromCPMSZones(oc, availabilityZones)
suffix := getMachineSuffix(oc, machineName)
getMachineAvailabilityZoneJSON := getAvailabilityZoneJSONByCloud[iaasPlatform][getMachineAvailabilityZoneJSONCon]
getCPMSAvailabilityZonesJSON := getAvailabilityZoneJSONByCloud[iaasPlatform][getCPMSAvailabilityZonesJSONCon]
deleteFailureDomain, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.failureDomains."+iaasPlatform.String()+"["+strconv.Itoa(key)+"]}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete the failureDomain to trigger RollingUpdate")
labelsBefore := "machine.openshift.io/zone=" + value + ",machine.openshift.io/cluster-api-machine-type=master"
labelsAfter := "machine.openshift.io/zone!=" + value + ",machine.openshift.io/cluster-api-machine-type=master"
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer func() {
availabilityZonesStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getCPMSAvailabilityZonesJSON, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(availabilityZonesStr, value) {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
waitForCPMSUpdateCompleted(oc, 1)
}
}()
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"remove","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/`+strconv.Itoa(key)+`"}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
newMachineNameRolledWithFailureDomain := clusterinfra.WaitForMachineRunningBySuffix(oc, suffix, labelsAfter)
clusterinfra.WaitForMachineDisappearBySuffix(oc, suffix, labelsBefore)
waitForClusterStable(oc)
g.By("Check if it will rebalance the machines")
availabilityZones = getCPMSAvailabilityZones(oc, iaasPlatform)
if len(availabilityZones) >= 3 {
e2e.Logf("availabilityZones>=3 means the three master machines are in different zones now, it will not rebalance when adding new zone")
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
o.Expect(checkIfCPMSCoIsStable(oc)).To(o.BeTrue())
} else {
g.By("Add the failureDomain back to check RollingUpdate strategy rebalance the machines")
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, newMachineNameRolledWithFailureDomain, "-n", "openshift-machine-api", getMachineAvailabilityZoneJSON).Output()
o.Expect(err).NotTo(o.HaveOccurred())
labelsAfter = "machine.openshift.io/zone=" + availabilityZone + ",machine.openshift.io/cluster-api-machine-type=master"
oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/failureDomains/`+iaasPlatform.String()+`/0","value":`+deleteFailureDomain+`}]`, "--type=json", "-n", machineAPINamespace).Execute()
newMachineNameRolledBalancedFailureDomain := clusterinfra.WaitForMachinesRunningByLabel(oc, 1, labelsBefore)[0]
e2e.Logf("updatedMachineName:%s", newMachineNameRolledBalancedFailureDomain)
suffix = getMachineSuffix(oc, newMachineNameRolledBalancedFailureDomain)
clusterinfra.WaitForMachineDisappearBySuffix(oc, suffix, labelsAfter)
waitForClusterStable(oc)
}
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
})
// author: [email protected]
g.It("Author:zhsun-NonHyperShiftHOST-Medium-70442-A warning should be shown when removing the target pools from cpms [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.GCP)
publicZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dns", "cluster", "-n", "openshift-dns", "-o=jsonpath={.spec.publicZone}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if publicZone == "" {
g.Skip("Because on private clusters we don't use target pools so skip this case for private clusters!!")
}
targetPool := "null"
g.By("Add targetpool")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
patchWithTargetPool, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"add","path":"/spec/template/machines_v1beta1_machine_openshift_io/spec/providerSpec/value/targetPools","value":`+targetPool+`}]`, "--type=json", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Remove targetpool")
patchWithoutTargetPool, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `[{"op":"remove","path":"/spec/template/machines_v1beta1_machine_openshift_io/spec/providerSpec/value/targetPools"}]`, "--type=json", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(patchWithTargetPool).To(o.ContainSubstring("Warning: spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.targetPools: TargetPools field is not set on ControlPlaneMachineSet"))
o.Expect(patchWithoutTargetPool).To(o.ContainSubstring("Warning: spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.targetPools: TargetPools field is not set on ControlPlaneMachineSet"))
})
// author: [email protected]
g.It("Author:huliu-NonHyperShiftHOST-Medium-78773-[CPMS] Webhook validation for custom name formats to Control Plane Machines via CPMS [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
if !exutil.IsTechPreviewNoUpgrade(oc) {
g.Skip("featureSet: TechPreviewNoUpgrade is required for this test")
}
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
g.By("Patch invalid machine name prefix")
out, _ := oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":"abcd_0"}}`, "--type=merge", "-n", machineAPINamespace).Output()
o.Expect(out).To(o.ContainSubstring(`Invalid value: "string": a lowercase RFC 1123 subdomain must consist of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'). Each block, separated by periods, must start and end with an alphanumeric character. Hyphens are not allowed at the start or end of a block, and consecutive periods are not permitted.`))
})
})
| package clusterinfrastructure | ||||
test case | openshift/openshift-tests-private | f6867572-f168-41ad-ab54-66d2137f4e7e | Author:zhsun-NonHyperShiftHOST-High-56086-Controlplanemachineset should be created by default | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-High-56086-Controlplanemachineset should be created by default", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.GCP, clusterinfra.Azure, clusterinfra.Nutanix, clusterinfra.VSphere)
g.By("CPMS should be created by default and state is Active")
cpmsState, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace, "-o=jsonpath={.spec.state}").Output()
o.Expect(cpmsState).To(o.ContainSubstring("Active"))
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 4cc788e9-bcc7-4aa7-a560-75caf3ed6a27 | Author:zhsun-NonHyperShiftHOST-Medium-53320-Owner reference could be added/removed to control plan machines [Disruptive] | ['"time"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-53320-Owner reference could be added/removed to control plan machines [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
g.By("Check ownerReferences is added to master machines")
masterMachineList := clusterinfra.ListMasterMachineNames(oc)
for _, masterMachineName := range masterMachineList {
ownerReferences, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, masterMachineName, "-o=jsonpath={.metadata.ownerReferences}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ownerReferences).ShouldNot(o.BeEmpty())
}
g.By("Delete controlplanemachineset")
defer printNodeInfo(oc)
defer activeControlPlaneMachineSet(oc)
deleteControlPlaneMachineSet(oc)
g.By("Check ownerReferences is removed from master machines")
err := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
cpmsState, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-n", machineAPINamespace, "-o=jsonpath={.spec.state}").Output()
if cpmsState == "Inactive" {
for _, masterMachineName := range masterMachineList {
ownerReferences, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, masterMachineName, "-o=jsonpath={.metadata.ownerReferences}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ownerReferences).Should(o.BeEmpty())
}
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "controlplanemachineset is not re-created")
}) | |||||
test case | openshift/openshift-tests-private | 02e420dc-05f7-4e46-a673-9ffa4328f14f | Author:zhsun-NonHyperShiftHOST-Medium-53081-Finalizer should be added to control plan machineset [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-Medium-53081-Finalizer should be added to control plan machineset [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
g.By("Check finalizer is added to controlplanemachineset")
finalizers, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.metadata.finalizers[0]}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(finalizers).To(o.ContainSubstring("controlplanemachineset.machine.openshift.io"))
g.By("Remove finalizer")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"metadata":{"finalizers":null}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Finalizer should be re-added to controlplanemachineset")
finalizers, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.metadata.finalizers[0]}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(finalizers).To(o.ContainSubstring("controlplanemachineset.machine.openshift.io"))
}) | |||||
test case | openshift/openshift-tests-private | 29afb879-8535-435c-9f16-f4a2e0e5f161 | Author:zhsun-NonHyperShiftHOST-High-53610-Operator control-plane-machine-set should be in Available state and report version information | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-High-53610-Operator control-plane-machine-set should be in Available state and report version information", func() {
capability, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o=jsonpath={.status.capabilities.enabledCapabilities}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if !strings.Contains(capability, "MachineAPI") {
g.Skip("MachineAPI not enabled so co control-plane-machine-set wont be present")
}
state, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/control-plane-machine-set", "-o=jsonpath={.status.conditions[?(@.type==\"Available\")].status}{.status.conditions[?(@.type==\"Progressing\")].status}{.status.conditions[?(@.type==\"Degraded\")].status}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
version, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusteroperator/control-plane-machine-set", "-o=jsonpath={.status.versions[0].version}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(state).To(o.ContainSubstring("TrueFalseFalse"))
o.Expect(version).To(o.ContainSubstring("4."))
}) | |||||
test case | openshift/openshift-tests-private | dc4bbd06-7338-4313-a83f-175829d70f0f | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-53323-78772-Implement update logic for RollingUpdate CPMS strategy update instance type [Disruptive] | ['"time"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-53323-78772-Implement update logic for RollingUpdate CPMS strategy update instance type [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
controlPlaneArch := architecture.GetControlPlaneArch(oc)
changeInstanceType := changeToBackupInstanceType[iaasPlatform][controlPlaneArch][changeInstanceTypeCon]
backupInstanceType := changeToBackupInstanceType[iaasPlatform][controlPlaneArch][backupInstanceTypeCon]
if iaasPlatform == clusterinfra.GCP && controlPlaneArch == architecture.AMD64 {
confidentialCompute, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.confidentialCompute}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if confidentialCompute == "Enabled" {
changeInstanceType = "c2d-standard-4"
backupInstanceType = "n2d-standard-4"
}
}
g.By("Get current instanceType")
currentInstanceType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getInstanceTypeJsonByCloud[iaasPlatform][getInstanceTypeJSONCon], "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentInstanceType:%s", currentInstanceType)
if currentInstanceType == changeInstanceType {
changeInstanceType = backupInstanceType
}
labelsAfter := "machine.openshift.io/instance-type=" + changeInstanceType + ",machine.openshift.io/cluster-api-machine-type=master"
labelsBefore := "machine.openshift.io/instance-type=" + currentInstanceType + ",machine.openshift.io/cluster-api-machine-type=master"
g.By("Check if any other fields need to be updated")
otherUpdateFields := otherUpdateFieldsByCloud[iaasPlatform][updateFieldsCon]
otherRecoverFields := otherUpdateFieldsByCloud[iaasPlatform][recoverFieldsCon]
if iaasPlatform == clusterinfra.AWS {
clusterinfra.GetAwsCredentialFromCluster(oc)
awsClient := exutil.InitAwsSession()
_, err := awsClient.GetPlacementGroupByName("pgpartition3")
if err != nil {
otherUpdateFields = ``
otherRecoverFields = ``
}
}
patchstrChange := getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypePrefixCon] + `"` + changeInstanceType + `"` + otherUpdateFields + getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypeSuffixCon]
patchstrRecover := getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypePrefixCon] + `"` + currentInstanceType + `"` + otherRecoverFields + getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypeSuffixCon]
g.By("Change instanceType to trigger RollingUpdate")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer waitForCPMSUpdateCompleted(oc, 1)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrRecover, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":null}}`, "--type=merge", "-n", machineAPINamespace).Execute()
customMachineName := customMasterMachineNamePrefix
if iaasPlatform == clusterinfra.GCP {
customMachineName = customMasterMachineNamePrefixGCP
}
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":"`+customMachineName+`"}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrChange, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
/*
The RollingUpdate will update all the master machines one by one,
here only check the first machine updated success, then consider the case passed to save time,
because all the machines update are the same, so I think it's ok to assumpt that.
*/
updatedMachineName := clusterinfra.WaitForMachinesRunningByLabel(oc, 1, labelsAfter)[0]
e2e.Logf("updatedMachineName:%s", updatedMachineName)
if exutil.IsTechPreviewNoUpgrade(oc) {
o.Expect(updatedMachineName).To(o.HavePrefix(customMachineName))
}
suffix := getMachineSuffix(oc, updatedMachineName)
e2e.Logf("suffix:%s", suffix)
clusterinfra.WaitForMachineDisappearBySuffix(oc, suffix, labelsBefore)
}) | |||||
test case | openshift/openshift-tests-private | 1b3f087a-4db9-43e9-8f95-76adf4c612a8 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-53323-78772-Implement update logic for RollingUpdate CPMS strategy update some field [Disruptive] | ['clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-53323-78772-Implement update logic for RollingUpdate CPMS strategy update some field [Disruptive]", func() {
//For the providers which don't have instance type, we will update some other field to trigger update
//For nutanix, we choose vcpusPerSocket
//For vsphere, we choose diskGiB
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix, clusterinfra.VSphere)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
var changeFieldValue, backupFieldValue, getFieldValueJSON string
var patchstrPrefix, patchstrSuffix string
changeFieldValue = changeToBackupSpecificField[iaasPlatform][changeSpecificFieldCon]
backupFieldValue = changeToBackupSpecificField[iaasPlatform][backupSpecificFieldCon]
getFieldValueJSON = getSpecificFieldJsonByCloud[iaasPlatform][getSpecificFieldJSONCon]
patchstrPrefix = getSpecificFieldJsonByCloud[iaasPlatform][patchSpecificFieldPrefixCon]
patchstrSuffix = getSpecificFieldJsonByCloud[iaasPlatform][patchSpecificFieldSuffixCon]
g.By("Get current field value")
currentFieldValue, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getFieldValueJSON, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentFieldValue:%s", currentFieldValue)
if currentFieldValue == changeFieldValue {
changeFieldValue = backupFieldValue
}
getMachineFieldValueJSON := getSpecificFieldJsonByCloud[iaasPlatform][getMachineFieldValueJSONCon]
patchstrChange := patchstrPrefix + changeFieldValue + patchstrSuffix
patchstrRecover := patchstrPrefix + currentFieldValue + patchstrSuffix
g.By("Change field value to trigger RollingUpdate")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer waitForCPMSUpdateCompleted(oc, 1)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrRecover, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":null}}`, "--type=merge", "-n", machineAPINamespace).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":"`+customMasterMachineNamePrefix+`"}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrChange, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
labelMaster := "machine.openshift.io/cluster-api-machine-type=master"
updatedMachineName := clusterinfra.WaitForMachineRunningByField(oc, getMachineFieldValueJSON, changeFieldValue, labelMaster)
e2e.Logf("updatedMachineName:%s", updatedMachineName)
if exutil.IsTechPreviewNoUpgrade(oc) {
o.Expect(updatedMachineName).To(o.HavePrefix(customMasterMachineNamePrefix))
}
suffix := getMachineSuffix(oc, updatedMachineName)
e2e.Logf("suffix:%s", suffix)
clusterinfra.WaitForMachineDisappearBySuffixAndField(oc, suffix, getMachineFieldValueJSON, currentFieldValue, labelMaster)
}) | |||||
test case | openshift/openshift-tests-private | 15aabd27-f370-46b0-8e34-ecf0ee902565 | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55631-Implement update logic for RollingUpdate CPMS strategy - Delete a master machine [Disruptive] | ['"math/rand"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-55631-Implement update logic for RollingUpdate CPMS strategy - Delete a master machine [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP, clusterinfra.Nutanix, clusterinfra.VSphere)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
g.By("Random pick a master machine")
machineName := clusterinfra.ListMasterMachineNames(oc)[rand.Int31n(int32(len(clusterinfra.ListMasterMachineNames(oc))))]
suffix := getMachineSuffix(oc, machineName)
var getMachineAvailabilityZoneJSON string
labels := "machine.openshift.io/cluster-api-machine-type=master"
if iaasPlatform == clusterinfra.AWS || iaasPlatform == clusterinfra.Azure || iaasPlatform == clusterinfra.GCP {
getMachineAvailabilityZoneJSON = getAvailabilityZoneJSONByCloud[iaasPlatform][getMachineAvailabilityZoneJSONCon]
availabilityZone, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(mapiMachine, machineName, "-n", "openshift-machine-api", getMachineAvailabilityZoneJSON).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if availabilityZone != "" {
labels = "machine.openshift.io/zone=" + availabilityZone + ",machine.openshift.io/cluster-api-machine-type=master"
}
}
g.By("Delete the master machine to trigger RollingUpdate")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(mapiMachine, machineName, "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterinfra.WaitForMachineRunningBySuffix(oc, suffix, labels)
clusterinfra.WaitForMachineDisappearByName(oc, machineName)
}) | |||||
test case | openshift/openshift-tests-private | 44868e6e-a03f-4d85-aabe-681d7592fc5d | Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-54005-78772-Control plane machine set OnDelete update strategies - update instance type [Disruptive] | ['"math/rand"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:zhsun-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-54005-78772-Control plane machine set OnDelete update strategies - update instance type [Disruptive]", func() {
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.AWS, clusterinfra.Azure, clusterinfra.GCP)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
controlPlaneArch := architecture.GetControlPlaneArch(oc)
changeInstanceType := changeToBackupInstanceType[iaasPlatform][controlPlaneArch][changeInstanceTypeCon]
backupInstanceType := changeToBackupInstanceType[iaasPlatform][controlPlaneArch][backupInstanceTypeCon]
if iaasPlatform == clusterinfra.GCP && controlPlaneArch == architecture.AMD64 {
confidentialCompute, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", "-o=jsonpath={.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.confidentialCompute}", "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if confidentialCompute == "Enabled" {
changeInstanceType = "c2d-standard-4"
backupInstanceType = "n2d-standard-4"
}
}
g.By("Get current instanceType")
currentInstanceType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getInstanceTypeJsonByCloud[iaasPlatform][getInstanceTypeJSONCon], "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentInstanceType:%s", currentInstanceType)
if currentInstanceType == changeInstanceType {
changeInstanceType = backupInstanceType
}
labelsAfter := "machine.openshift.io/instance-type=" + changeInstanceType + ",machine.openshift.io/cluster-api-machine-type=master"
patchstrChange := getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypePrefixCon] + `"` + changeInstanceType + `"` + getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypeSuffixCon]
patchstrRecover := getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypePrefixCon] + `"` + currentInstanceType + `"` + getInstanceTypeJsonByCloud[iaasPlatform][patchInstanceTypeSuffixCon]
g.By("Update strategy to OnDelete, change instanceType to trigger OnDelete update")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer waitForCPMSUpdateCompleted(oc, 1)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":null}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrRecover, "--type=merge", "-n", machineAPINamespace).Execute()
defer waitForClusterStable(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"OnDelete"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
customMachineName := customMasterMachineNamePrefix
if iaasPlatform == clusterinfra.GCP {
customMachineName = customMasterMachineNamePrefixGCP
}
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":"`+customMachineName+`"}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrChange, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete one master manually")
toDeletedMachineName := clusterinfra.ListMasterMachineNames(oc)[rand.Int31n(int32(len(clusterinfra.ListMasterMachineNames(oc))))]
clusterinfra.DeleteMachine(oc, toDeletedMachineName)
g.By("Check new master will be created and old master will be deleted")
newCreatedMachineName := clusterinfra.WaitForMachinesRunningByLabel(oc, 1, labelsAfter)[0]
e2e.Logf("newCreatedMachineName:%s", newCreatedMachineName)
if exutil.IsTechPreviewNoUpgrade(oc) {
o.Expect(newCreatedMachineName).To(o.HavePrefix(customMachineName))
}
clusterinfra.WaitForMachineDisappearByName(oc, toDeletedMachineName)
waitForClusterStable(oc)
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 25d82a04-1608-485b-8218-b8bd90f747ce | Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Author:huliu-Medium-54005-78772-Control plane machine set OnDelete update strategies - update some field [Disruptive] | ['"math/rand"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | github.com/openshift/openshift-tests-private/test/extended/clusterinfrastructure/control_plane_machineset.go | g.It("Author:huliu-NonHyperShiftHOST-Longduration-NonPreRelease-Author:huliu-Medium-54005-78772-Control plane machine set OnDelete update strategies - update some field [Disruptive]", func() {
//For the providers which don't have instance type, we will update some other field to trigger update
//For nutanix, we choose vcpusPerSocket
//For vsphere, we choose diskGiB
clusterinfra.SkipConditionally(oc)
clusterinfra.SkipTestIfSupportedPlatformNotMatched(oc, clusterinfra.Nutanix, clusterinfra.VSphere)
skipForCPMSNotStable(oc)
skipForClusterNotStable(oc)
var changeFieldValue, backupFieldValue, getFieldValueJSON string
var patchstrPrefix, patchstrSuffix string
changeFieldValue = changeToBackupSpecificField[iaasPlatform][changeSpecificFieldCon]
backupFieldValue = changeToBackupSpecificField[iaasPlatform][backupSpecificFieldCon]
getFieldValueJSON = getSpecificFieldJsonByCloud[iaasPlatform][getSpecificFieldJSONCon]
patchstrPrefix = getSpecificFieldJsonByCloud[iaasPlatform][patchSpecificFieldPrefixCon]
patchstrSuffix = getSpecificFieldJsonByCloud[iaasPlatform][patchSpecificFieldSuffixCon]
g.By("Get current field value")
currentFieldValue, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("controlplanemachineset/cluster", getFieldValueJSON, "-n", machineAPINamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("currentFieldValue:%s", currentFieldValue)
if currentFieldValue == changeFieldValue {
changeFieldValue = backupFieldValue
}
getMachineFieldValueJSON := getSpecificFieldJsonByCloud[iaasPlatform][getMachineFieldValueJSONCon]
patchstrChange := patchstrPrefix + changeFieldValue + patchstrSuffix
patchstrRecover := patchstrPrefix + currentFieldValue + patchstrSuffix
g.By("Update strategy to OnDelete, change field value to trigger OnDelete update")
defer printNodeInfo(oc)
defer waitMasterNodeReady(oc)
defer waitForClusterStable(oc)
defer waitForCPMSUpdateCompleted(oc, 1)
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"RollingUpdate"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":null}}`, "--type=merge", "-n", machineAPINamespace).Execute()
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrRecover, "--type=merge", "-n", machineAPINamespace).Execute()
defer waitForClusterStable(oc)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"strategy":{"type":"OnDelete"}}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", `{"spec":{"machineNamePrefix":"`+customMasterMachineNamePrefix+`"}}`, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("controlplanemachineset/cluster", "-p", patchstrChange, "--type=merge", "-n", machineAPINamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Delete one master manually")
toDeletedMachineName := clusterinfra.ListMasterMachineNames(oc)[rand.Int31n(int32(len(clusterinfra.ListMasterMachineNames(oc))))]
clusterinfra.DeleteMachine(oc, toDeletedMachineName)
g.By("Check new master will be created and old master will be deleted")
labelMaster := "machine.openshift.io/cluster-api-machine-type=master"
newCreatedMachineName := clusterinfra.WaitForMachineRunningByField(oc, getMachineFieldValueJSON, changeFieldValue, labelMaster)
e2e.Logf("newCreatedMachineName:%s", newCreatedMachineName)
if exutil.IsTechPreviewNoUpgrade(oc) {
o.Expect(newCreatedMachineName).To(o.HavePrefix(customMasterMachineNamePrefix))
}
clusterinfra.WaitForMachineDisappearByName(oc, toDeletedMachineName)
waitForClusterStable(oc)
o.Expect(checkIfCPMSIsStable(oc)).To(o.BeTrue())
}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.