element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case
|
openshift/openshift-tests-private
|
fbb177d8-8591-45fa-8df5-c42375b01223
|
Author:sregidor-NonPreRelease-Longduration-High-43151-[OnCLayer] add node label to service monitor [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-High-43151-[OnCLayer] add node label to service monitor [Serial]", func() {
exutil.By("Get current mcd_ metrics from machine-config-daemon service")
svcMCD := NewNamespacedResource(oc.AsAdmin(), "service", MachineConfigNamespace, MachineConfigDaemon)
clusterIP, ipErr := WrapWithBracketsIfIpv6(svcMCD.GetOrFail("{.spec.clusterIP}"))
o.Expect(ipErr).ShouldNot(o.HaveOccurred(), "No valid IP")
port := svcMCD.GetOrFail("{.spec.ports[?(@.name==\"metrics\")].port}")
token := getSATokenFromContainer(oc, "prometheus-k8s-0", "openshift-monitoring", "prometheus")
statsCmd := fmt.Sprintf("curl -s -k -H 'Authorization: Bearer %s' https://%s:%s/metrics | grep 'mcd_' | grep -v '#'", token, clusterIP, port)
logger.Infof("stats output:\n %s", statsCmd)
statsOut, err := exutil.RemoteShPod(oc, "openshift-monitoring", "prometheus-k8s-0", "sh", "-c", statsCmd)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(statsOut).Should(o.ContainSubstring("mcd_host_os_and_version"))
o.Expect(statsOut).Should(o.ContainSubstring("mcd_kubelet_state"))
o.Expect(statsOut).Should(o.ContainSubstring("mcd_pivot_errors_total"))
o.Expect(statsOut).Should(o.ContainSubstring("mcd_reboots_failed_total"))
o.Expect(statsOut).Should(o.ContainSubstring("mcd_state"))
o.Expect(statsOut).Should(o.ContainSubstring("mcd_update_state"))
o.Expect(statsOut).Should(o.ContainSubstring("mcd_update_state"))
exutil.By("Check relabeling section in machine-config-daemon")
sourceLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("servicemonitor/machine-config-daemon", "-n", MachineConfigNamespace,
"-o", "jsonpath='{.spec.endpoints[*].relabelings[*].sourceLabels}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(sourceLabels).Should(o.ContainSubstring("__meta_kubernetes_pod_node_name"))
exutil.By("Check node label in mcd_state metrics")
stateQuery := getPrometheusQueryResults(oc, "mcd_state")
logger.Infof("metrics:\n %s", stateQuery)
firstMasterNode := NewNodeList(oc).GetAllMasterNodesOrFail()[0]
firstWorkerNode := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
o.Expect(stateQuery).Should(o.ContainSubstring(`"node":"` + firstMasterNode.name + `"`))
o.Expect(stateQuery).Should(o.ContainSubstring(`"node":"` + firstWorkerNode.name + `"`))
})
| |||||
test case
|
openshift/openshift-tests-private
|
06f8d629-d00c-4abe-87c6-0986dcf18aab
|
Author:sregidor-NonPreRelease-Longduration-High-43726-[P1][OnCLayer] azure Controller-Config Infrastructure does not match cluster Infrastructure resource [Serial]
|
['"encoding/json"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-High-43726-[P1][OnCLayer] azure Controller-Config Infrastructure does not match cluster Infrastructure resource [Serial]", func() {
exutil.By("Get machine-config-controller platform status.")
mccPlatformStatus := NewResource(oc.AsAdmin(), "controllerconfig", "machine-config-controller").GetOrFail("{.spec.infra.status.platformStatus}")
logger.Infof("test mccPlatformStatus:\n %s", mccPlatformStatus)
if exutil.CheckPlatform(oc) == AzurePlatform {
exutil.By("check cloudName field.")
var jsonMccPlatformStatus map[string]interface{}
errparseinfra := json.Unmarshal([]byte(mccPlatformStatus), &jsonMccPlatformStatus)
o.Expect(errparseinfra).NotTo(o.HaveOccurred())
o.Expect(jsonMccPlatformStatus).Should(o.HaveKey("azure"))
azure := jsonMccPlatformStatus["azure"].(map[string]interface{})
o.Expect(azure).Should(o.HaveKey("cloudName"))
}
exutil.By("Get infrastructure platform status.")
infraPlatformStatus := NewResource(oc.AsAdmin(), "infrastructures", "cluster").GetOrFail("{.status.platformStatus}")
logger.Infof("infraPlatformStatus:\n %s", infraPlatformStatus)
exutil.By("Check same status in infra and machine-config-controller.")
o.Expect(mccPlatformStatus).To(o.Equal(infraPlatformStatus))
})
| |||||
test case
|
openshift/openshift-tests-private
|
56bfc935-d533-4fa9-9570-cb6e4781dfd1
|
Author:mhanss-NonPreRelease-Longduration-High-42680-[P2] change pull secret in the openshift-config namespace [Serial]
|
['"encoding/json"', '"os/exec"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:mhanss-NonPreRelease-Longduration-High-42680-[P2] change pull secret in the openshift-config namespace [Serial]", func() {
exutil.By("Add a dummy credential in pull secret")
secretFile, err := getPullSecret(oc)
o.Expect(err).NotTo(o.HaveOccurred())
newSecretFile := generateTmpFile(oc, "pull-secret.dockerconfigjson")
_, copyErr := exec.Command("bash", "-c", "cp "+secretFile+" "+newSecretFile).Output()
o.Expect(copyErr).NotTo(o.HaveOccurred())
newPullSecret, err := oc.AsAdmin().WithoutNamespace().Run("registry").Args("login", `--registry="quay.io"`, `--auth-basic="mhans-redhat:redhat123"`, "--to="+newSecretFile, "--skip-check").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(newPullSecret).Should(o.ContainSubstring(`Saved credentials for "quay.io"`))
setData, err := setDataForPullSecret(oc, newSecretFile)
defer func() {
_, err := setDataForPullSecret(oc, secretFile)
o.Expect(err).NotTo(o.HaveOccurred())
}()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(setData).Should(o.Equal("secret/pull-secret data updated"))
exutil.By("Wait for configuration to be applied in master and worker pools")
mcpWorker := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcpMaster := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
mcpWorker.waitForComplete()
mcpMaster.waitForComplete()
exutil.By("Check new generated rendered configs for newly added pull secret")
renderedConfs, renderedErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("mc", "--sort-by=metadata.creationTimestamp", "-o", "jsonpath='{.items[-2:].metadata.name}'").Output()
o.Expect(renderedErr).NotTo(o.HaveOccurred())
o.Expect(renderedConfs).NotTo(o.BeEmpty())
slices := strings.Split(strings.Trim(renderedConfs, "'"), " ")
var renderedMasterConf, renderedWorkerConf string
for _, conf := range slices {
if strings.Contains(conf, MachineConfigPoolMaster) {
renderedMasterConf = conf
} else if strings.Contains(conf, MachineConfigPoolWorker) {
renderedWorkerConf = conf
}
}
logger.Infof("New rendered config generated for master: %s", renderedMasterConf)
logger.Infof("New rendered config generated for worker: %s", renderedWorkerConf)
exutil.By("Check logs of machine-config-daemon on master-n-worker nodes, make sure pull secret changes are detected, drain and reboot are skipped")
masterNode := NewNodeList(oc).GetAllMasterNodesOrFail()[0]
workerNode := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
commonExpectedStrings := []string{`Writing file "/var/lib/kubelet/config.json"`, "Changes do not require drain, skipping"}
expectedStringsForMaster := append(commonExpectedStrings, "Node has Desired Config "+renderedMasterConf+", skipping reboot")
expectedStringsForWorker := append(commonExpectedStrings, "Node has Desired Config "+renderedWorkerConf+", skipping reboot")
masterMcdLogs, masterMcdLogErr := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigDaemon, masterNode.GetMachineConfigDaemon(), "")
o.Expect(masterMcdLogErr).NotTo(o.HaveOccurred())
workerMcdLogs, workerMcdLogErr := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigDaemon, workerNode.GetMachineConfigDaemon(), "")
o.Expect(workerMcdLogErr).NotTo(o.HaveOccurred())
foundOnMaster := containsMultipleStrings(masterMcdLogs, expectedStringsForMaster)
o.Expect(foundOnMaster).Should(o.BeTrue())
logger.Infof("MCD log on master node %s contains expected strings: %v", masterNode.name, expectedStringsForMaster)
foundOnWorker := containsMultipleStrings(workerMcdLogs, expectedStringsForWorker)
o.Expect(foundOnWorker).Should(o.BeTrue())
logger.Infof("MCD log on worker node %s contains expected strings: %v", workerNode.name, expectedStringsForWorker)
})
| |||||
test case
|
openshift/openshift-tests-private
|
7650aae6-d3c3-4e28-a128-94ca731c53c0
|
Author:sregidor-NonPreRelease-Longduration-High-45239-[OnCLayer] KubeletConfig has a limit of 10 per cluster [Disruptive]
|
['"fmt"', '"strings"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-High-45239-[OnCLayer] KubeletConfig has a limit of 10 per cluster [Disruptive]", func() {
kcsLimit := 10
exutil.By("Pause mcp worker")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
defer mcp.pause(false)
defer mcp.waitForComplete() // wait before unpausing, or an update will be triggered
mcp.pause(true)
exutil.By("Calculate number of existing KubeletConfigs")
kcList := NewKubeletConfigList(oc.AsAdmin())
kcs, kclErr := kcList.GetAll()
o.Expect(kclErr).ShouldNot(o.HaveOccurred(), "Error getting existing KubeletConfig resources")
existingKcs := len(kcs)
logger.Infof("%d existing KubeletConfigs. We need to create %d KubeletConfigs to reach the %d configs limit",
existingKcs, kcsLimit-existingKcs, kcsLimit)
exutil.By(fmt.Sprintf("Create %d kubelet config to reach the limit", kcsLimit-existingKcs))
createdKcs := []ResourceInterface{}
kcTemplate := generateTemplateAbsolutePath("change-maxpods-kubelet-config.yaml")
for n := existingKcs + 1; n <= kcsLimit; n++ {
kcName := fmt.Sprintf("change-maxpods-kubelet-config-%d", n)
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
defer kc.DeleteOrFail()
kc.create()
createdKcs = append(createdKcs, kc)
logger.Infof("Created:\n %s", kcName)
}
exutil.By("Created kubeletconfigs must be successful")
for _, kcItem := range createdKcs {
kcItem.(*KubeletConfig).waitUntilSuccess("15s")
}
exutil.By(fmt.Sprintf("Check that %d machine configs were created", kcsLimit-existingKcs))
renderedKcConfigsSuffix := "worker-generated-kubelet"
verifyRenderedMcs(oc, renderedKcConfigsSuffix, createdKcs)
exutil.By(fmt.Sprintf("Create a new Kubeletconfig. The %dth one", kcsLimit+1))
kcName := fmt.Sprintf("change-maxpods-kubelet-config-%d", kcsLimit+1)
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
defer kc.DeleteOrFail()
kc.create()
exutil.By(fmt.Sprintf("Created kubeletconfigs over the limit must report a failure regarding the %d configs limit", kcsLimit))
expectedMsg := fmt.Sprintf("could not get kubelet config key: max number of supported kubelet config (%d) has been reached. Please delete old kubelet configs before retrying", kcsLimit)
kc.waitUntilFailure(expectedMsg, "10s")
exutil.By("Created kubeletconfigs inside the limit must be successful")
for _, kcItem := range createdKcs {
kcItem.(*KubeletConfig).waitUntilSuccess("10s")
}
exutil.By("Check that only the right machine configs were created")
// Check all ContainerRuntimeConfigs, the one created by this TC and the already existing ones too
allKcs := []ResourceInterface{}
allKcs = append(allKcs, createdKcs...)
for _, kcItem := range kcs {
key := kcItem
allKcs = append(allKcs, &key)
}
allMcs := verifyRenderedMcs(oc, renderedKcConfigsSuffix, allKcs)
kcCounter := 0
for _, mc := range allMcs {
if strings.HasPrefix(mc.name, "99-"+renderedKcConfigsSuffix) {
kcCounter++
}
}
o.Expect(kcCounter).Should(o.Equal(10), "Only %d Kubeletconfig resources should be generated", kcsLimit)
})
| |||||
test case
|
openshift/openshift-tests-private
|
6ba9d266-4482-43c2-992f-2772c2185192
|
Author:sregidor-NonPreRelease-Longduration-High-48468-[P1][OnCLayer] ContainerRuntimeConfig has a limit of 10 per cluster [Disruptive]
|
['"fmt"', '"strings"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-High-48468-[P1][OnCLayer] ContainerRuntimeConfig has a limit of 10 per cluster [Disruptive]", func() {
crsLimit := 10
exutil.By("Pause mcp worker")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
defer mcp.pause(false)
defer mcp.waitForComplete() // wait before unpausing, or an update will be triggered
mcp.pause(true)
exutil.By("Calculate number of existing ContainerRuntimeConfigs")
crList := NewContainerRuntimeConfigList(oc.AsAdmin())
crs, crlErr := crList.GetAll()
o.Expect(crlErr).ShouldNot(o.HaveOccurred(), "Error getting existing ContainerRuntimeConfig resources")
existingCrs := len(crs)
logger.Infof("%d existing ContainerRuntimeConfig. We need to create %d ContainerRuntimeConfigs to reach the %d configs limit",
existingCrs, crsLimit-existingCrs, crsLimit)
exutil.By(fmt.Sprintf("Create %d container runtime configs to reach the limit", crsLimit-existingCrs))
createdCrs := []ResourceInterface{}
crTemplate := generateTemplateAbsolutePath("change-ctr-cr-config.yaml")
for n := existingCrs + 1; n <= crsLimit; n++ {
crName := fmt.Sprintf("change-ctr-cr-config-%d", n)
cr := NewContainerRuntimeConfig(oc.AsAdmin(), crName, crTemplate)
defer cr.DeleteOrFail()
cr.create()
createdCrs = append(createdCrs, cr)
logger.Infof("Created:\n %s", crName)
}
exutil.By("Created ContainerRuntimeConfigs must be successful")
for _, crItem := range createdCrs {
crItem.(*ContainerRuntimeConfig).waitUntilSuccess("10s")
}
exutil.By(fmt.Sprintf("Check that %d machine configs were created", crsLimit-existingCrs))
renderedCrConfigsSuffix := "worker-generated-containerruntime"
logger.Infof("Pre function res: %v", createdCrs)
verifyRenderedMcs(oc, renderedCrConfigsSuffix, createdCrs)
exutil.By(fmt.Sprintf("Create a new ContainerRuntimeConfig. The %dth one", crsLimit+1))
crName := fmt.Sprintf("change-ctr-cr-config-%d", crsLimit+1)
cr := NewContainerRuntimeConfig(oc.AsAdmin(), crName, crTemplate)
defer cr.DeleteOrFail()
cr.create()
exutil.By(fmt.Sprintf("Created container runtime configs over the limit must report a failure regarding the %d configs limit", crsLimit))
expectedMsg := fmt.Sprintf("could not get ctrcfg key: max number of supported ctrcfgs (%d) has been reached. Please delete old ctrcfgs before retrying", crsLimit)
cr.waitUntilFailure(expectedMsg, "10s")
exutil.By("Created kubeletconfigs inside the limit must be successful")
for _, crItem := range createdCrs {
crItem.(*ContainerRuntimeConfig).waitUntilSuccess("10s")
}
exutil.By("Check that only the right machine configs were created")
// Check all ContainerRuntimeConfigs, the one created by this TC and the already existing ones too
allCrs := []ResourceInterface{}
allCrs = append(allCrs, createdCrs...)
for _, crItem := range crs {
key := crItem
allCrs = append(allCrs, &key)
}
allMcs := verifyRenderedMcs(oc, renderedCrConfigsSuffix, allCrs)
crCounter := 0
for _, mc := range allMcs {
if strings.HasPrefix(mc.name, "99-"+renderedCrConfigsSuffix) {
crCounter++
}
}
o.Expect(crCounter).Should(o.Equal(10), "Only %d containerruntime resources should be generated", crsLimit)
})
| |||||
test case
|
openshift/openshift-tests-private
|
a261f733-3144-414a-be56-1e07841487d1
|
Author:sregidor-Longduration-NonPreRelease-High-46314-[P2][OnCLayer] Incorrect file contents if compression field is specified [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-46314-[P2][OnCLayer] Incorrect file contents if compression field is specified [Serial]", func() {
exutil.By("Create a new MachineConfig to provision a config file in zipped format")
fileContent := `Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut
enim ad minim veniam, quis nostrud exercitation ullamco laboris
nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit esse cillum dolore eu fugiat
nulla pariatur. Excepteur sint occaecat cupidatat non proident,
sunt in culpa qui officia deserunt mollit anim id est laborum.
nulla pariatur.`
mcName := "99-gzip-test"
destPath := "/etc/test-file"
fileConfig := getGzipFileJSONConfig(destPath, fileContent)
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
defer mc.delete()
err := mc.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait until worker MachineConfigPool has finished the configuration")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcp.waitForComplete()
exutil.By("Verfiy that the file has been properly provisioned")
node := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
rf := NewRemoteFile(node, destPath)
err = rf.Fetch()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(rf.GetTextContent()).To(o.Equal(fileContent))
o.Expect(rf.GetNpermissions()).To(o.Equal("0644"))
o.Expect(rf.GetUIDName()).To(o.Equal("root"))
o.Expect(rf.GetGIDName()).To(o.Equal("root"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
557556ab-f02a-4b05-bb53-24bfe83a90af
|
Author:sregidor-NonHyperShiftHOST-High-46424-[OnCLayer] Check run level
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-High-46424-[OnCLayer] Check run level", func() {
exutil.By("Validate openshift-machine-config-operator run level")
mcoNs := NewResource(oc.AsAdmin(), "ns", MachineConfigNamespace)
runLevel := mcoNs.GetOrFail(`{.metadata.labels.openshift\.io/run-level}`)
logger.Debugf("Namespace definition:\n%s", mcoNs.PrettyString())
o.Expect(runLevel).To(o.Equal(""), `openshift-machine-config-operator namespace should have run-level annotation equal to ""`)
exutil.By("Validate machine-config-operator SCC")
podsList := NewNamespacedResourceList(oc.AsAdmin(), "pods", mcoNs.name)
podsList.ByLabel("k8s-app=machine-config-operator")
mcoPods, err := podsList.GetAll()
o.Expect(err).NotTo(o.HaveOccurred())
logger.Infof("Validating that there is only one machine-config-operator pod")
o.Expect(mcoPods).To(o.HaveLen(1))
mcoPod := mcoPods[0]
scc := mcoPod.GetOrFail(`{.metadata.annotations.openshift\.io/scc}`)
logger.Infof("Validating that the operator pod has the right SCC")
logger.Debugf("Machine-config-operator pod definition:\n%s", mcoPod.PrettyString())
// on baremetal cluster, value of openshift.io/scc is nfs-provisioner, on AWS cluster it is hostmount-anyuid
o.Expect(scc).Should(o.SatisfyAny(o.Equal("hostmount-anyuid"), o.Equal("nfs-provisioner"), o.Equal("anyuid")),
`machine-config-operator pod is not using the right SCC`)
exutil.By("Validate machine-config-daemon clusterrole")
mcdCR := NewResource(oc.AsAdmin(), "clusterrole", "machine-config-daemon")
mcdRules := mcdCR.GetOrFail(`{.rules[?(@.apiGroups[0]=="security.openshift.io")]}`)
logger.Debugf("Machine-config-operator clusterrole definition:\n%s", mcdCR.PrettyString())
o.Expect(mcdRules).Should(o.ContainSubstring("privileged"),
`machine-config-daemon clusterrole has not the right configuration for ApiGroup "security.openshift.io"`)
exutil.By("Validate machine-config-server clusterrole")
mcsCR := NewResource(oc.AsAdmin(), "clusterrole", "machine-config-server")
mcsRules := mcsCR.GetOrFail(`{.rules[?(@.apiGroups[0]=="security.openshift.io")]}`)
logger.Debugf("Machine-config-server clusterrole definition:\n%s", mcdCR.PrettyString())
o.Expect(mcsRules).Should(o.ContainSubstring("hostnetwork"),
`machine-config-server clusterrole has not the right configuration for ApiGroup "security.openshift.io"`)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
a230727e-c24f-4011-a8dd-50b1ef6b9abd
|
Author:sregidor-Longduration-NonPreRelease-High-46434-[OnCLayer] Mask service [Serial]
|
['"encoding/json"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-46434-[OnCLayer] Mask service [Serial]", func() {
activeString := "Active: active (running)"
inactiveString := "Active: inactive (dead)"
maskedString := "Loaded: masked"
exutil.By("Validate that the chronyd service is active")
workerNode := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
svcOuput, err := workerNode.DebugNodeWithChroot("systemctl", "status", "chronyd")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(svcOuput).Should(o.ContainSubstring(activeString))
o.Expect(svcOuput).ShouldNot(o.ContainSubstring(inactiveString))
exutil.By("Create a MachineConfig resource to mask the chronyd service")
mcName := "99-test-mask-services"
maskSvcConfig := getMaskServiceConfig("chronyd.service", true)
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
defer mc.delete()
err = mc.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("UNITS=[%s]", maskSvcConfig))
o.Expect(err).NotTo(o.HaveOccurred())
// if service is masked, but node drain is failed, unmask chronyd service on all worker nodes in this defer block
// then clean up logic will delete this mc, node will be rebooted, when the system is back online, chronyd service
// can be started automatically, unmask command can be executed w/o error with loaded & active service
defer func() {
workersNodes := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()
for _, worker := range workersNodes {
svcName := "chronyd"
_, err := worker.UnmaskService(svcName)
// just print out unmask op result here, make sure unmask op can be executed on all the worker nodes
if err != nil {
logger.Errorf("unmask %s failed on node %s: %v", svcName, worker.name, err)
} else {
logger.Infof("unmask %s success on node %s", svcName, worker.name)
}
}
}()
exutil.By("Wait until worker MachineConfigPool has finished the configuration")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcp.waitForComplete()
exutil.By("Validate that the chronyd service is masked")
svcMaskedOuput, _ := workerNode.DebugNodeWithChroot("systemctl", "status", "chronyd")
// Since the service is masked, the "systemctl status chronyd" command will return a value != 0 and an error will be reported
// So we dont check the error, only the output
o.Expect(svcMaskedOuput).ShouldNot(o.ContainSubstring(activeString))
o.Expect(svcMaskedOuput).Should(o.ContainSubstring(inactiveString))
o.Expect(svcMaskedOuput).Should(o.ContainSubstring(maskedString))
exutil.By("Patch the MachineConfig resource to unmaskd the svc")
// This part needs to be changed once we refactor MachineConfig to embed the Resource struct.
// We will use here the 'mc' object directly
mcresource := NewResource(oc.AsAdmin(), "mc", mc.name)
err = mcresource.Patch("json", `[{ "op": "replace", "path": "/spec/config/systemd/units/0/mask", "value": false}]`)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait until worker MachineConfigPool has finished the configuration")
mcp.waitForComplete()
exutil.By("Validate that the chronyd service is unmasked")
svcUnMaskedOuput, err := workerNode.DebugNodeWithChroot("systemctl", "status", "chronyd")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(svcUnMaskedOuput).Should(o.ContainSubstring(activeString))
o.Expect(svcUnMaskedOuput).ShouldNot(o.ContainSubstring(inactiveString))
})
| |||||
test case
|
openshift/openshift-tests-private
|
4fd0d33f-6573-4629-b206-66cdad1fc08d
|
Author:sregidor-Longduration-NonPreRelease-High-46943-[P1][OnCLayer] Config Drift. Config file. [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-46943-[P1][OnCLayer] Config Drift. Config file. [Serial]", func() {
exutil.By("Create a MC to deploy a config file")
filePath := "/etc/mco-test-file"
fileContent := "MCO test file\n"
fileConfig := getURLEncodedFileConfig(filePath, fileContent, "")
mcName := "mco-drift-test-file"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
defer mc.delete()
err := mc.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait until worker MCP has finished the configuration. No machine should be degraded.")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcp.waitForComplete()
exutil.By("Verfiy file content and permissions")
workerNode := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
defaultMode := "0644"
rf := NewRemoteFile(workerNode, filePath)
rferr := rf.Fetch()
o.Expect(rferr).NotTo(o.HaveOccurred())
o.Expect(rf.GetTextContent()).To(o.Equal(fileContent))
o.Expect(rf.GetNpermissions()).To(o.Equal(defaultMode))
exutil.By("Verify drift config behavior")
defer func() {
_ = rf.PushNewPermissions(defaultMode)
_ = rf.PushNewTextContent(fileContent)
_ = mcp.WaitForNotDegradedStatus()
}()
newMode := "0400"
useForceFile := false
verifyDriftConfig(mcp, rf, newMode, useForceFile)
})
| |||||
test case
|
openshift/openshift-tests-private
|
1b32359c-a8fb-43fe-8fbb-217db1a69901
|
Author:rioliu-NonPreRelease-Longduration-High-46965-[P2] Avoid workload disruption for GPG Public Key Rotation [Serial]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-46965-[P2] Avoid workload disruption for GPG Public Key Rotation [Serial]", func() {
exutil.By("create new machine config with base64 encoded gpg public key")
workerNode := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
startTime := workerNode.GetDateOrFail()
mcName := "add-gpg-pub-key"
mcTemplate := "add-gpg-pub-key.yaml"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker).SetMCOTemplate(mcTemplate)
defer mc.delete()
mc.create()
exutil.By("checkout machine config daemon logs to verify ")
log, err := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigDaemon, workerNode.GetMachineConfigDaemon(), "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(log).Should(o.ContainSubstring("/etc/machine-config-daemon/no-reboot/containers-gpg.pub"))
o.Expect(log).Should(o.ContainSubstring("Changes do not require drain, skipping"))
o.Expect(log).Should(o.MatchRegexp(MCDCrioReloadedRegexp))
o.Expect(workerNode.GetUptime()).Should(o.BeTemporally("<", startTime),
"The node %s must NOT be rebooted, but it was rebooted. Uptime date happened after the start config time.", workerNode.GetName())
exutil.By("verify crio.service status")
cmdOut, cmdErr := workerNode.DebugNodeWithChroot("systemctl", "is-active", "crio.service")
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(cmdOut).Should(o.ContainSubstring("active"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
74318ebe-8266-44f5-967b-636f86ddc727
|
Author:rioliu-NonPreRelease-Longduration-High-47062-change policy.json on worker nodes [Serial]
|
['"encoding/json"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-47062-change policy.json on worker nodes [Serial]", func() {
exutil.By("create new machine config to change /etc/containers/policy.json")
workerNode := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
startTime := workerNode.GetDateOrFail()
mcName := "change-policy-json"
mcTemplate := "change-policy-json.yaml"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker).SetMCOTemplate(mcTemplate)
defer mc.delete()
mc.create()
exutil.By("verify file content changes")
fileContent, fileErr := workerNode.DebugNodeWithChroot("cat", "/etc/containers/policy.json")
o.Expect(fileErr).NotTo(o.HaveOccurred())
logger.Infof(fileContent)
o.Expect(fileContent).Should(o.ContainSubstring(`{"default": [{"type": "insecureAcceptAnything"}]}`))
o.Expect(fileContent).ShouldNot(o.ContainSubstring("transports"))
exutil.By("checkout machine config daemon logs to make sure node drain/reboot are skipped")
log, err := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigDaemon, workerNode.GetMachineConfigDaemon(), "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(log).Should(o.ContainSubstring("/etc/containers/policy.json"))
o.Expect(log).Should(o.ContainSubstring("Changes do not require drain, skipping"))
o.Expect(log).Should(o.MatchRegexp(MCDCrioReloadedRegexp))
o.Expect(workerNode.GetUptime()).Should(o.BeTemporally("<", startTime),
"The node %s must NOT be rebooted, but it was rebooted. Uptime date happened after the start config time.", workerNode.GetName())
exutil.By("verify crio.service status")
cmdOut, cmdErr := workerNode.DebugNodeWithChroot("systemctl", "is-active", "crio.service")
o.Expect(cmdErr).NotTo(o.HaveOccurred())
o.Expect(cmdOut).Should(o.ContainSubstring("active"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
b239bb65-44fd-484c-be08-0fb46519e97e
|
Author:sregidor-Longduration-NonPreRelease-High-46999-[P1] Config Drift. Config file permissions. [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-46999-[P1] Config Drift. Config file permissions. [Serial]", func() {
exutil.By("Create a MC to deploy a config file")
filePath := "/etc/mco-test-file"
fileContent := "MCO test file\n"
fileMode := "0400" // decimal 256
fileConfig := getURLEncodedFileConfig(filePath, fileContent, fileMode)
mcName := "mco-drift-test-file-permissions"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
defer mc.delete()
err := mc.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait until worker MCP has finished the configuration. No machine should be degraded.")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcp.waitForComplete()
exutil.By("Verfiy file content and permissions")
workerNode := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
rf := NewRemoteFile(workerNode, filePath)
rferr := rf.Fetch()
o.Expect(rferr).NotTo(o.HaveOccurred())
o.Expect(rf.GetTextContent()).To(o.Equal(fileContent))
o.Expect(rf.GetNpermissions()).To(o.Equal(fileMode))
exutil.By("Verify drift config behavior")
defer func() {
_ = rf.PushNewPermissions(fileMode)
_ = rf.PushNewTextContent(fileContent)
_ = mcp.WaitForNotDegradedStatus()
}()
newMode := "0644"
useForceFile := true
verifyDriftConfig(mcp, rf, newMode, useForceFile)
})
| |||||
test case
|
openshift/openshift-tests-private
|
69067d50-718b-41a8-b40c-16e8c2032178
|
Author:sregidor-Longduration-NonPreRelease-High-47045-[P2] Config Drift. Compressed files. [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-47045-[P2] Config Drift. Compressed files. [Serial]", func() {
exutil.By("Create a MC to deploy a config file using compression")
filePath := "/etc/mco-compressed-test-file"
fileContent := "MCO test file\nusing compression"
fileConfig := getGzipFileJSONConfig(filePath, fileContent)
mcName := "mco-drift-test-compressed-file"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
defer mc.delete()
err := mc.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait until worker MCP has finished the configuration. No machine should be degraded.")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcp.waitForComplete()
exutil.By("Verfiy file content and permissions")
workerNode := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
rf := NewRemoteFile(workerNode, filePath)
rferr := rf.Fetch()
o.Expect(rferr).NotTo(o.HaveOccurred())
defaultMode := "0644"
o.Expect(rf.GetTextContent()).To(o.Equal(fileContent))
o.Expect(rf.GetNpermissions()).To(o.Equal(defaultMode))
exutil.By("Verfy drift config behavior")
defer func() {
_ = rf.PushNewPermissions(defaultMode)
_ = rf.PushNewTextContent(fileContent)
_ = mcp.WaitForNotDegradedStatus()
}()
newMode := "0400"
useForceFile := true
verifyDriftConfig(mcp, rf, newMode, useForceFile)
})
| |||||
test case
|
openshift/openshift-tests-private
|
c527f549-2077-4d82-8cb4-8ed80a25f2ce
|
Author:sregidor-Longduration-NonPreRelease-High-47008-Config Drift. Dropin file. [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-47008-Config Drift. Dropin file. [Serial]", func() {
exutil.By("Create a MC to deploy a unit with a dropin file")
dropinFileName := "10-chrony-drop-test.conf"
filePath := "/etc/systemd/system/chronyd.service.d/" + dropinFileName
fileContent := "[Service]\nEnvironment=\"FAKE_OPTS=fake-value\""
unitEnabled := true
unitName := "chronyd.service"
unitConfig := getDropinFileConfig(unitName, unitEnabled, dropinFileName, fileContent)
mcName := "drifted-dropins-test"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
defer mc.delete()
err := mc.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("UNITS=[%s]", unitConfig))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait until worker MCP has finished the configuration. No machine should be degraded.")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcp.waitForComplete()
exutil.By("Verfiy file content and permissions")
workerNode := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
rf := NewRemoteFile(workerNode, filePath)
rferr := rf.Fetch()
o.Expect(rferr).NotTo(o.HaveOccurred())
defaultMode := "0644"
o.Expect(rf.GetTextContent()).To(o.Equal(fileContent))
o.Expect(rf.GetNpermissions()).To(o.Equal(defaultMode))
exutil.By("Verify drift config behavior")
defer func() {
_ = rf.PushNewPermissions(defaultMode)
_ = rf.PushNewTextContent(fileContent)
_ = mcp.WaitForNotDegradedStatus()
}()
newMode := "0400"
useForceFile := true
verifyDriftConfig(mcp, rf, newMode, useForceFile)
})
| |||||
test case
|
openshift/openshift-tests-private
|
6eac08dc-0456-4a85-9c95-f4be6c5e376c
|
Author:sregidor-Longduration-NonPreRelease-High-47009-Config Drift. New Service Unit. [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-47009-Config Drift. New Service Unit. [Serial]", func() {
exutil.By("Create a MC to deploy a unit.")
unitEnabled := true
unitName := "example.service"
filePath := "/etc/systemd/system/" + unitName
fileContent := "[Service]\nType=oneshot\nExecStart=/usr/bin/echo Hello from MCO test service\n\n[Install]\nWantedBy=multi-user.target"
unitConfig := getSingleUnitConfig(unitName, unitEnabled, fileContent)
mcName := "drifted-new-service-test"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
defer mc.delete()
err := mc.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("UNITS=[%s]", unitConfig))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Wait until worker MCP has finished the configuration. No machine should be degraded.")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcp.waitForComplete()
exutil.By("Verfiy file content and permissions")
workerNode := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
rf := NewRemoteFile(workerNode, filePath)
rferr := rf.Fetch()
o.Expect(rferr).NotTo(o.HaveOccurred())
defaultMode := "0644"
o.Expect(rf.GetTextContent()).To(o.Equal(fileContent))
o.Expect(rf.GetNpermissions()).To(o.Equal(defaultMode))
exutil.By("Verfiy deployed unit")
unitStatus, _ := workerNode.GetUnitStatus(unitName)
// since it is a one-shot "hello world" service the execution will end
// after the hello message and the unit will become inactive. So we dont check the error code.
o.Expect(unitStatus).Should(
o.And(
o.ContainSubstring(unitName),
o.ContainSubstring("Active: inactive (dead)"),
o.ContainSubstring("Hello from MCO test service"),
o.ContainSubstring("example.service: Deactivated successfully.")))
exutil.By("Verify drift config behavior")
defer func() {
_ = rf.PushNewPermissions(defaultMode)
_ = rf.PushNewTextContent(fileContent)
_ = mcp.WaitForNotDegradedStatus()
}()
newMode := "0400"
useForceFile := true
verifyDriftConfig(mcp, rf, newMode, useForceFile)
})
| |||||
test case
|
openshift/openshift-tests-private
|
3e4c0298-1695-4510-8e15-3e5212e6d96d
|
Author:sregidor-Longduration-NonPreRelease-High-51381-[P1][OnCLayer] cordon node before node drain. OCP >= 4.11 [Serial]
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-51381-[P1][OnCLayer] cordon node before node drain. OCP >= 4.11 [Serial]", func() {
exutil.By("Capture initial migration-controller logs")
ctrlerContainer := "machine-config-controller"
ctrlerPod, podsErr := getMachineConfigControllerPod(oc)
o.Expect(podsErr).NotTo(o.HaveOccurred())
o.Expect(ctrlerPod).NotTo(o.BeEmpty())
initialCtrlerLogs, initErr := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, ctrlerContainer, ctrlerPod, "")
o.Expect(initErr).NotTo(o.HaveOccurred())
exutil.By("Create a MC to deploy a config file")
fileMode := "0644" // decimal 420
filePath := "/etc/chrony.conf"
fileContent := "pool 0.rhel.pool.ntp.org iburst\ndriftfile /var/lib/chrony/drift\nmakestep 1.0 3\nrtcsync\nlogdir /var/log/chrony"
fileConfig := getBase64EncodedFileConfig(filePath, fileContent, fileMode)
mcName := "ztc-51381-change-workers-chrony-configuration"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
defer mc.delete()
err := mc.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check MCD logs to make sure that the node is cordoned before being drained")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
workerNode := mcp.GetSortedNodesOrFail()[0]
o.Eventually(workerNode.IsCordoned, mcp.estimateWaitDuration().String(), "20s").Should(o.BeTrue(), "Worker node must be cordoned")
searchRegexp := fmt.Sprintf("(?s)%s: initiating cordon", workerNode.GetName())
if !workerNode.IsEdgeOrFail() {
// In edge nodes there is no node evicted because they are unschedulable so no pod is running
searchRegexp += fmt.Sprintf(".*node %s: Evicted pod", workerNode.GetName())
}
searchRegexp += fmt.Sprintf(".*node %s: operation successful; applying completion annotation", workerNode.GetName())
o.Eventually(func() string {
podAllLogs, _ := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, ctrlerContainer, ctrlerPod, "")
// Remove the part of the log captured at the beginning of the test.
// We only check the part of the log that this TC generates and ignore the previously generated logs
return strings.Replace(podAllLogs, initialCtrlerLogs, "", 1)
}, "5m", "10s").Should(o.MatchRegexp(searchRegexp), "Node should be cordoned before being drained")
exutil.By("Wait until worker MCP has finished the configuration. No machine should be degraded.")
mcp.waitForComplete()
exutil.By("Verfiy file content and permissions")
rf := NewRemoteFile(workerNode, filePath)
rferr := rf.Fetch()
o.Expect(rferr).NotTo(o.HaveOccurred())
o.Expect(rf.GetTextContent()).To(o.Equal(fileContent))
o.Expect(rf.GetNpermissions()).To(o.Equal(fileMode))
})
| |||||
test case
|
openshift/openshift-tests-private
|
868a5cb8-c02c-4a1e-ac1d-edc6e1ebeed2
|
Author:sregidor-Longduration-NonPreRelease-High-49568-[OnCLayer] Check nodes updating order maxUnavailable=1 [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-49568-[OnCLayer] Check nodes updating order maxUnavailable=1 [Serial]", func() {
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
// In OCL nodes are scaled up using the original osImage, and then MCO applies an update on them
// To avoid problems we do not scale up new nodes if OCL is enabled
// Once OCL is able to boot the nodes directly with the right image we can scale up nodes if the pool is OCL
if exutil.OrFail[bool](WorkersCanBeScaled(oc.AsAdmin())) && !exutil.OrFail[bool](mcp.IsOCL()) {
exutil.By("Scale machinesets and 1 more replica to make sure we have at least 2 nodes per machineset")
platform := exutil.CheckPlatform(oc)
logger.Infof("Platform is %s", platform)
if platform != "none" && platform != "" {
err := AddToAllMachineSets(oc, 1)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() { o.Expect(AddToAllMachineSets(oc, -1)).NotTo(o.HaveOccurred()) }()
} else {
logger.Infof("Platform is %s, skipping the MachineSets replica configuration", platform)
}
} else {
logger.Infof("The worker pool cannot be scaled using machinesets or it is OCL. Skip adding new nodes")
}
exutil.By("Get the nodes in the worker pool sorted by update order")
workerNodes, errGet := mcp.GetSortedNodes()
o.Expect(errGet).NotTo(o.HaveOccurred())
exutil.By("Create a MC to deploy a config file")
filePath := "/etc/TC-49568-mco-test-file-order"
fileContent := "MCO test file order\n"
fileMode := "0400" // decimal 256
fileConfig := getURLEncodedFileConfig(filePath, fileContent, fileMode)
mcName := "mco-test-file-order"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
defer mc.delete()
err := mc.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Poll the nodes sorted by the order they are updated")
maxUnavailable := 1
updatedNodes := mcp.GetSortedUpdatedNodes(maxUnavailable)
for _, n := range updatedNodes {
logger.Infof("updated node: %s created: %s zone: %s", n.GetName(), n.GetOrFail(`{.metadata.creationTimestamp}`), n.GetOrFail(`{.metadata.labels.topology\.kubernetes\.io/zone}`))
}
exutil.By("Wait for the configuration to be applied in all nodes")
mcp.waitForComplete()
exutil.By("Check that nodes were updated in the right order")
rightOrder := checkUpdatedLists(workerNodes, updatedNodes, maxUnavailable)
o.Expect(rightOrder).To(o.BeTrue(), "Expected update order %s, but found order %s", workerNodes, updatedNodes)
exutil.By("Verfiy file content and permissions")
rf := NewRemoteFile(workerNodes[0], filePath)
rferr := rf.Fetch()
o.Expect(rferr).NotTo(o.HaveOccurred())
o.Expect(rf.GetTextContent()).To(o.Equal(fileContent))
o.Expect(rf.GetNpermissions()).To(o.Equal(fileMode))
})
| |||||
test case
|
openshift/openshift-tests-private
|
33e01380-7dab-4ebc-a137-1291b9f77f76
|
Author:sregidor-Longduration-NonPreRelease-High-49672-[OnCLayer] Check nodes updating order maxUnavailable>1 [Serial]
|
['"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-49672-[OnCLayer] Check nodes updating order maxUnavailable>1 [Serial]", func() {
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
// In OCL nodes are scaled up using the original osImage, and then MCO applies an update on them
// To avoid problems we do not scale up new nodes if OCL is enabled
// Once OCL is able to boot the nodes directly with the right image we can scale up nodes if the pool is OCL
if exutil.OrFail[bool](WorkersCanBeScaled(oc.AsAdmin())) && !exutil.OrFail[bool](mcp.IsOCL()) {
exutil.By("Scale machinesets and 1 more replica to make sure we have at least 2 nodes per machineset")
platform := exutil.CheckPlatform(oc)
logger.Infof("Platform is %s", platform)
if platform != "none" && platform != "" {
err := AddToAllMachineSets(oc, 1)
o.Expect(err).NotTo(o.HaveOccurred())
defer func() { o.Expect(AddToAllMachineSets(oc, -1)).NotTo(o.HaveOccurred()) }()
} else {
logger.Infof("Platform is %s, skipping the MachineSets replica configuration", platform)
}
} else {
logger.Infof("The worker pool cannot be scaled using machinesets or it is OCL. Skip adding new nodes")
}
// If the number of nodes is 2, since we are using maxUnavailable=2, all nodes will be cordoned at
// the same time and the eviction process will be stuck. In this case we need to skip the test case.
numWorkers := len(NewNodeList(oc).GetAllLinuxWorkerNodesOrFail())
if numWorkers <= 2 {
g.Skip(fmt.Sprintf("The test case needs at least 3 worker nodes, because eviction will be stuck if not. Current num worker is %d, we skip the case",
numWorkers))
}
exutil.By("Get the nodes in the worker pool sorted by update order")
workerNodes, errGet := mcp.GetSortedNodes()
o.Expect(errGet).NotTo(o.HaveOccurred())
exutil.By("Set maxUnavailable value")
maxUnavailable := 2
mcp.SetMaxUnavailable(maxUnavailable)
defer mcp.RemoveMaxUnavailable()
exutil.By("Create a MC to deploy a config file")
filePath := "/etc/TC-49672-mco-test-file-order"
fileContent := "MCO test file order 2\n"
fileMode := "0400" // decimal 256
fileConfig := getURLEncodedFileConfig(filePath, fileContent, fileMode)
mcName := "mco-test-file-order2"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
defer mc.delete()
err := mc.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Poll the nodes sorted by the order they are updated")
updatedNodes := mcp.GetSortedUpdatedNodes(maxUnavailable)
for _, n := range updatedNodes {
logger.Infof("updated node: %s created: %s zone: %s", n.GetName(), n.GetOrFail(`{.metadata.creationTimestamp}`), n.GetOrFail(`{.metadata.labels.topology\.kubernetes\.io/zone}`))
}
exutil.By("Wait for the configuration to be applied in all nodes")
mcp.waitForComplete()
exutil.By("Check that nodes were updated in the right order")
rightOrder := checkUpdatedLists(workerNodes, updatedNodes, maxUnavailable)
o.Expect(rightOrder).To(o.BeTrue(), "Expected update order %s, but found order %s", workerNodes, updatedNodes)
exutil.By("Verfiy file content and permissions")
rf := NewRemoteFile(workerNodes[0], filePath)
rferr := rf.Fetch()
o.Expect(rferr).NotTo(o.HaveOccurred())
o.Expect(rf.GetTextContent()).To(o.Equal(fileContent))
o.Expect(rf.GetNpermissions()).To(o.Equal(fileMode))
})
| |||||
test case
|
openshift/openshift-tests-private
|
aa1ae864-3c4b-49aa-a0a6-3f2417a9c595
|
Author:sregidor-NonPreRelease-Longduration-High-51219-[P1][OnCLayer] Check ClusterRole rules
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-High-51219-[P1][OnCLayer] Check ClusterRole rules", func() {
expectedServiceAcc := MachineConfigDaemon
eventsRoleBinding := MachineConfigDaemonEvents
eventsClusterRole := MachineConfigDaemonEvents
daemonClusterRoleBinding := MachineConfigDaemon
daemonClusterRole := MachineConfigDaemon
exutil.By(fmt.Sprintf("Check %s service account", expectedServiceAcc))
serviceAccount := NewNamespacedResource(oc.AsAdmin(), "ServiceAccount", MachineConfigNamespace, expectedServiceAcc)
o.Expect(serviceAccount.Exists()).To(o.BeTrue(), "Service account %s should exist in namespace %s", expectedServiceAcc, MachineConfigNamespace)
exutil.By("Check service accounts in daemon pods")
checkNodePermissions := func(node Node) {
daemonPodName := node.GetMachineConfigDaemon()
logger.Infof("Checking permissions in daemon pod %s", daemonPodName)
daemonPod := NewNamespacedResource(node.oc, "pod", MachineConfigNamespace, daemonPodName)
o.Expect(daemonPod.GetOrFail(`{.spec.serviceAccount}`)).Should(o.Equal(expectedServiceAcc),
"Pod %s should use service account: %s", daemonPodName, expectedServiceAcc)
o.Expect(daemonPod.GetOrFail(`{.spec.serviceAccountName}`)).Should(o.Equal(expectedServiceAcc),
"Pod %s should use service account name: %s", daemonPodName, expectedServiceAcc)
}
nodes, err := NewNodeList(oc.AsAdmin()).GetAllLinux()
o.Expect(err).ShouldNot(o.HaveOccurred(), "Error getting the list of nodes")
for _, node := range nodes {
exutil.By(fmt.Sprintf("Checking node %s", node.GetName()))
checkNodePermissions(node)
}
exutil.By("Check events rolebindings in default namespace")
defaultEventsRoleBindings := NewNamespacedResource(oc.AsAdmin(), "RoleBinding", "default", "machine-config-daemon-events")
o.Expect(defaultEventsRoleBindings.Exists()).Should(o.BeTrue(), "'%s' Rolebinding not found in 'default' namespace", eventsRoleBinding)
// Check the bound SA
machineConfigSubject := JSON(defaultEventsRoleBindings.GetOrFail(fmt.Sprintf(`{.subjects[?(@.name=="%s")]}`, expectedServiceAcc)))
o.Expect(machineConfigSubject.ToMap()).Should(o.HaveKeyWithValue("name", expectedServiceAcc),
"'%s' in 'default' namespace should bind %s SA in namespace %s", eventsRoleBinding, expectedServiceAcc, MachineConfigNamespace)
o.Expect(machineConfigSubject.ToMap()).Should(o.HaveKeyWithValue("namespace", MachineConfigNamespace),
"'%s' in 'default' namespace should bind %s SA in namespace %s", eventsRoleBinding, expectedServiceAcc, MachineConfigNamespace)
// Check the ClusterRole
machineConfigClusterRole := JSON(defaultEventsRoleBindings.GetOrFail(`{.roleRef}`))
o.Expect(machineConfigClusterRole.ToMap()).Should(o.HaveKeyWithValue("kind", "ClusterRole"),
"'%s' in 'default' namespace should bind a ClusterRole", eventsRoleBinding)
o.Expect(machineConfigClusterRole.ToMap()).Should(o.HaveKeyWithValue("name", eventsClusterRole),
"'%s' in 'default' namespace should bind %s ClusterRole", eventsRoleBinding, eventsClusterRole)
exutil.By(fmt.Sprintf("Check events rolebindings in %s namespace", MachineConfigNamespace))
mcoEventsRoleBindings := NewNamespacedResource(oc.AsAdmin(), "RoleBinding", MachineConfigNamespace, "machine-config-daemon-events")
o.Expect(defaultEventsRoleBindings.Exists()).Should(o.BeTrue(), "'%s' Rolebinding not found in '%s' namespace", eventsRoleBinding, MachineConfigNamespace)
// Check the bound SA
machineConfigSubject = JSON(mcoEventsRoleBindings.GetOrFail(fmt.Sprintf(`{.subjects[?(@.name=="%s")]}`, expectedServiceAcc)))
o.Expect(machineConfigSubject.ToMap()).Should(o.HaveKeyWithValue("name", expectedServiceAcc),
"'%s' in '%s' namespace should bind %s SA in namespace %s", eventsRoleBinding, MachineConfigNamespace, expectedServiceAcc, MachineConfigNamespace)
o.Expect(machineConfigSubject.ToMap()).Should(o.HaveKeyWithValue("namespace", MachineConfigNamespace),
"'%s' in '%s' namespace should bind %s SA in namespace %s", eventsRoleBinding, MachineConfigNamespace, expectedServiceAcc, MachineConfigNamespace)
// Check the ClusterRole
machineConfigClusterRole = JSON(mcoEventsRoleBindings.GetOrFail(`{.roleRef}`))
o.Expect(machineConfigClusterRole.ToMap()).Should(o.HaveKeyWithValue("kind", "ClusterRole"),
"'%s' in '%s' namespace should bind a ClusterRole", eventsRoleBinding, MachineConfigNamespace)
o.Expect(machineConfigClusterRole.ToMap()).Should(o.HaveKeyWithValue("name", eventsClusterRole),
"'%s' in '%s' namespace should bind %s CLusterRole", eventsRoleBinding, MachineConfigNamespace, eventsClusterRole)
exutil.By(fmt.Sprintf("Check MCO cluseterrolebindings in %s namespace", MachineConfigNamespace))
mcoCRB := NewResource(oc.AsAdmin(), "ClusterRoleBinding", daemonClusterRoleBinding)
o.Expect(mcoCRB.Exists()).Should(o.BeTrue(), "'%s' ClusterRolebinding not found.", daemonClusterRoleBinding)
// Check the bound SA
machineConfigSubject = JSON(mcoCRB.GetOrFail(fmt.Sprintf(`{.subjects[?(@.name=="%s")]}`, expectedServiceAcc)))
o.Expect(machineConfigSubject.ToMap()).Should(o.HaveKeyWithValue("name", expectedServiceAcc),
"'%s' ClusterRoleBinding should bind %s SA in namespace %s", daemonClusterRoleBinding, expectedServiceAcc, MachineConfigNamespace)
o.Expect(machineConfigSubject.ToMap()).Should(o.HaveKeyWithValue("namespace", MachineConfigNamespace),
"'%s' ClusterRoleBinding should bind %s SA in namespace %s", daemonClusterRoleBinding, expectedServiceAcc, MachineConfigNamespace)
// Check the ClusterRole
machineConfigClusterRole = JSON(mcoCRB.GetOrFail(`{.roleRef}`))
o.Expect(machineConfigClusterRole.ToMap()).Should(o.HaveKeyWithValue("kind", "ClusterRole"),
"'%s' ClusterRoleBinding should bind a ClusterRole", daemonClusterRoleBinding)
o.Expect(machineConfigClusterRole.ToMap()).Should(o.HaveKeyWithValue("name", daemonClusterRole),
"'%s' ClusterRoleBinding should bind %s CLusterRole", daemonClusterRoleBinding, daemonClusterRole)
exutil.By("Check events clusterrole")
eventsCR := NewResource(oc.AsAdmin(), "ClusterRole", eventsClusterRole)
o.Expect(eventsCR.Exists()).To(o.BeTrue(), "ClusterRole %s should exist", eventsClusterRole)
stringRules := eventsCR.GetOrFail(`{.rules}`)
o.Expect(stringRules).ShouldNot(o.ContainSubstring("pod"),
"ClusterRole %s should grant no pod permissions at all", eventsClusterRole)
rules := JSON(stringRules)
for _, rule := range rules.Items() {
describesEvents := false
resources := rule.Get("resources")
for _, resource := range resources.Items() {
if resource.ToString() == "events" {
describesEvents = true
}
}
if describesEvents {
verbs := rule.Get("verbs").ToList()
o.Expect(verbs).Should(o.ContainElement("create"), "In ClusterRole %s 'events' rule should have 'create' permissions", eventsClusterRole)
o.Expect(verbs).Should(o.ContainElement("patch"), "In ClusterRole %s 'events' rule should have 'patch' permissions", eventsClusterRole)
o.Expect(verbs).Should(o.HaveLen(2), "In ClusterRole %s 'events' rule should ONLY Have 'create' and 'patch' permissions", eventsClusterRole)
}
}
exutil.By("Check daemon clusterrole")
daemonCR := NewResource(oc.AsAdmin(), "ClusterRole", daemonClusterRole)
stringRules = daemonCR.GetOrFail(`{.rules}`)
o.Expect(stringRules).ShouldNot(o.ContainSubstring("pod"),
"ClusterRole %s should grant no pod permissions at all", daemonClusterRole)
o.Expect(stringRules).ShouldNot(o.ContainSubstring("daemonsets"),
"ClusterRole %s should grant no daemonsets permissions at all", daemonClusterRole)
rules = JSON(stringRules)
for _, rule := range rules.Items() {
describesNodes := false
resources := rule.Get("resources")
for _, resource := range resources.Items() {
if resource.ToString() == "nodes" {
describesNodes = true
}
}
if describesNodes {
verbs := rule.Get("verbs").ToList()
o.Expect(verbs).Should(o.ContainElement("get"), "In ClusterRole %s 'nodes' rule should have 'get' permissions", daemonClusterRole)
o.Expect(verbs).Should(o.ContainElement("list"), "In ClusterRole %s 'nodes' rule should have 'list' permissions", daemonClusterRole)
o.Expect(verbs).Should(o.ContainElement("watch"), "In ClusterRole %s 'nodes' rule should have 'watch' permissions", daemonClusterRole)
o.Expect(verbs).Should(o.HaveLen(3), "In ClusterRole %s 'events' rule should ONLY Have 'get', 'list' and 'watch' permissions", daemonClusterRole)
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
6940fa9b-457d-4fe7-b806-9fc9e69259d3
|
Author:sregidor-NonPreRelease-Longduration-Medium-52373-[P2][OnCLayer] Modify proxy configuration in paused pools [Disruptive]
|
['"encoding/json"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-52373-[P2][OnCLayer] Modify proxy configuration in paused pools [Disruptive]", func() {
proxyValue := "http://user:pass@proxy-fake:1111"
noProxyValue := "test.52373.no-proxy.com"
exutil.By("Get current proxy configuration")
proxy := NewResource(oc.AsAdmin(), "proxy", "cluster")
proxyInitialConfig := proxy.GetOrFail(`{.spec}`)
logger.Infof("Initial proxy configuration: %s", proxyInitialConfig)
wmcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mmcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
defer func() {
logger.Infof("Start TC defer block")
logger.Infof("Restore original proxy config %s", proxyInitialConfig)
_ = proxy.Patch("json", `[{ "op": "add", "path": "/spec", "value": `+proxyInitialConfig+`}]`)
logger.Infof("Wait for new machine configs to be rendered and paused pools to report updated status")
// We need to make sure that the config will NOT be applied, since the proxy is a fake one and if
// we dont make sure that the config proxy is reverted, the nodes will be broken and go into
// NotReady status
_ = wmcp.WaitForUpdatedStatus()
_ = mmcp.WaitForUpdatedStatus()
logger.Infof("Unpause worker pool")
wmcp.pause(false)
logger.Infof("Unpause master pool")
mmcp.pause(false)
logger.Infof("End TC defer block")
}()
exutil.By("Pause MCPs")
wmcp.pause(true)
mmcp.pause(true)
exutil.By("Configure new proxy")
err := proxy.Patch("json",
`[{ "op": "add", "path": "/spec/httpProxy", "value": "`+proxyValue+`" }]`)
o.Expect(err).ShouldNot(o.HaveOccurred(), "Error patching http proxy")
err = proxy.Patch("json",
`[{ "op": "add", "path": "/spec/httpsProxy", "value": "`+proxyValue+`" }]`)
o.Expect(err).ShouldNot(o.HaveOccurred(), "Error patching https proxy")
err = proxy.Patch("json",
`[{ "op": "add", "path": "/spec/noProxy", "value": "`+noProxyValue+`" }]`)
o.Expect(err).ShouldNot(o.HaveOccurred(), "Error patching noproxy")
exutil.By("Verify that the proxy configuration was applied to daemonsets")
mcoDs := NewNamespacedResource(oc.AsAdmin(), "DaemonSet", MachineConfigNamespace, "machine-config-daemon")
// it should never take longer than 5 minutes to apply the proxy config under any circumstance,
// it should be considered a bug.
o.Eventually(mcoDs.Poll(`{.spec}`), "5m", "30s").Should(o.ContainSubstring(proxyValue),
"machine-config-daemon is not using the new proxy configuration: %s", proxyValue)
o.Eventually(mcoDs.Poll(`{.spec}`), "5m", "30s").Should(o.ContainSubstring(noProxyValue),
"machine-config-daemon is not using the new no-proxy value: %s", noProxyValue)
exutil.By("Check that the operator has been marked as degraded")
mco := NewResource(oc.AsAdmin(), "co", "machine-config")
o.Eventually(mco.Poll(`{.status.conditions[?(@.type=="Degraded")].status}`),
"5m", "30s").Should(o.Equal("True"),
"machine-config Operator should report degraded status")
o.Eventually(mco.Poll(`{.status.conditions[?(@.type=="Degraded")].message}`),
"5m", "30s").Should(o.ContainSubstring(`required MachineConfigPool master is paused and cannot sync until it is unpaused`),
"machine-config Operator is not reporting the right reason for degraded status")
exutil.By("Restore original proxy configuration")
err = proxy.Patch("json", `[{ "op": "add", "path": "/spec", "value": `+proxyInitialConfig+`}]`)
o.Expect(err).ShouldNot(o.HaveOccurred(), "Error patching and restoring original proxy config")
exutil.By("Verify that the new configuration is applied to the daemonset")
// it should never take longer than 5 minutes to apply the proxy config under any circumstance,
// it should be considered a bug.
o.Eventually(mcoDs.Poll(`{.spec}`), "5m", "30s").ShouldNot(o.ContainSubstring(proxyValue),
"machine-config-daemon has not restored the original proxy configuration")
o.Eventually(mcoDs.Poll(`{.spec}`), "5m", "30s").ShouldNot(o.ContainSubstring(noProxyValue),
"machine-config-daemon has not restored the original proxy configuration for 'no-proxy'")
exutil.By("Check that the operator is not marked as degraded anymore")
o.Eventually(mco.Poll(`{.status.conditions[?(@.type=="Degraded")].status}`),
"5m", "30s").Should(o.Equal("False"),
"machine-config Operator should not report degraded status anymore")
})
| |||||
test case
|
openshift/openshift-tests-private
|
26fbf4c8-7a31-4109-a295-ebdaccf58aba
|
Author:sregidor-NonPreRelease-Longduration-Medium-52520-Configure unqualified-search-registries in Image.config resource [Disruptive]
|
['"encoding/json"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-52520-Configure unqualified-search-registries in Image.config resource [Disruptive]", func() {
expectedDropinFilePath := "/etc/containers/registries.conf.d/01-image-searchRegistries.conf"
expectedDropinContent := "unqualified-search-registries = [\"quay.io\"]\nshort-name-mode = \"\"\n"
exutil.By("Get current image.config cluster configuration")
ic := NewResource(oc.AsAdmin(), "image.config", "cluster")
icInitialConfig := ic.GetOrFail(`{.spec}`)
logger.Infof("Initial image.config cluster configuration: %s", icInitialConfig)
wmcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mmcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
workers, wsErr := wmcp.GetSortedNodes()
o.Expect(wsErr).ShouldNot(o.HaveOccurred(), "Error getting the nodes in worker pool")
masters, msErr := mmcp.GetSortedNodes()
o.Expect(msErr).ShouldNot(o.HaveOccurred(), "Error getting the nodes in master pool")
firstUpdatedWorker := workers[0]
firstUpdatedMaster := masters[0]
defer func() {
logger.Infof("Start TC defer block")
logger.Infof("Restore original image.config cluster config %s", icInitialConfig)
_ = ic.Patch("json", `[{ "op": "add", "path": "/spec", "value": `+icInitialConfig+`}]`)
logger.Infof("Wait for the original configuration to be applied")
wmcp.waitForComplete()
mmcp.waitForComplete()
logger.Infof("End TC defer block")
}()
exutil.By("Add quay.io to unqualified-search-regisitries list in image.config cluster resource")
startTime, dErr := firstUpdatedMaster.GetDate()
o.Expect(dErr).ShouldNot(o.HaveOccurred(), "Error getting date in node %s", firstUpdatedMaster.GetName())
o.Expect(firstUpdatedWorker.IgnoreEventsBeforeNow()).NotTo(o.HaveOccurred(),
"Error getting the last event in node %s", firstUpdatedWorker.GetName())
o.Expect(firstUpdatedMaster.IgnoreEventsBeforeNow()).NotTo(o.HaveOccurred(),
"Error getting the last event in node %s", firstUpdatedMaster.GetName())
patchErr := ic.Patch("merge", `{"spec": {"registrySources": {"containerRuntimeSearchRegistries":["quay.io"]}}}`)
o.Expect(patchErr).ShouldNot(o.HaveOccurred(), "Error while partching the image.config cluster resource")
exutil.By("Wait for first nodes to be configured")
// Worker and master nodes should go into 'working' status
o.Eventually(firstUpdatedWorker.IsUpdating, "8m", "20s").Should(o.BeTrue(),
"Node %s is not in 'working' status after the new image.conig is configured", firstUpdatedWorker.GetName())
o.Eventually(firstUpdatedMaster.IsUpdating, "8m", "20s").Should(o.BeTrue(),
"Node %s is not in 'working' status after the new image.conig is configured", firstUpdatedMaster.GetName())
// We dont actually wait for the whole configuration to be applied
// we will only wait for those nodes to be unpdated
// Not waiting for the MCPs to finish the configuration makes this test case faster
// If it causes unstability, just wait here for the MCPs to complete the configuration instead
// Worker and master nodes should go into 'working' status
o.Eventually(firstUpdatedWorker.IsUpdated, "10m", "20s").Should(o.BeTrue(),
"Node %s is not in 'Done' status after the configuration is applied", firstUpdatedWorker.GetName())
o.Eventually(firstUpdatedMaster.IsUpdated, "10m", "20s").Should(o.BeTrue(),
"Node %s is not in 'Done' status after the configuration is applied", firstUpdatedMaster.GetName())
exutil.By("Print all events for the verified worker node")
el := NewEventList(oc.AsAdmin(), MachineConfigNamespace)
el.ByFieldSelector(`involvedObject.name=` + firstUpdatedWorker.GetName())
events, _ := el.GetAll()
printString := ""
for _, event := range events {
printString += fmt.Sprintf("- %s\n", event)
}
logger.Infof("All events for node %s:\n%s", firstUpdatedWorker.GetName(), printString)
logger.Infof("OK!\n")
exutil.By("Verify that a drain and reboot events were triggered for worker node")
wEvents, weErr := firstUpdatedWorker.GetEvents()
logger.Infof("All events for node %s since: %s", firstUpdatedWorker.GetName(), firstUpdatedWorker.eventCheckpoint)
for _, event := range wEvents {
logger.Infof("- %s", event)
}
o.Expect(weErr).ShouldNot(o.HaveOccurred(), "Error getting events for node %s", firstUpdatedWorker.GetName())
o.Expect(wEvents).To(HaveEventsSequence("Drain", "Reboot"),
"Error, the expected sequence of events is not found in node %s", firstUpdatedWorker.GetName())
exutil.By("Verify that a drain and reboot events were triggered for master node")
mEvents, meErr := firstUpdatedMaster.GetEvents()
o.Expect(meErr).ShouldNot(o.HaveOccurred(), "Error getting drain events for node %s", firstUpdatedMaster.GetName())
o.Expect(mEvents).To(HaveEventsSequence("Drain", "Reboot"),
"Error, the expected sequence of events is not found in node %s", firstUpdatedWorker.GetName())
exutil.By("Verify that the node was actually rebooted")
o.Expect(firstUpdatedWorker.GetUptime()).Should(o.BeTemporally(">", startTime),
"The node %s should have been rebooted after the configurion. Uptime didnt happen after start config time.")
o.Expect(firstUpdatedMaster.GetUptime()).Should(o.BeTemporally(">", startTime),
"The node %s should have been rebooted after the configurion. Uptime didnt happen after start config time.")
exutil.By("Verify dropin file's content in worker node")
wdropinFile := NewRemoteFile(firstUpdatedWorker, expectedDropinFilePath)
wfetchErr := wdropinFile.Fetch()
o.Expect(wfetchErr).ShouldNot(o.HaveOccurred(), "Error getting the content offile %s in node %s",
expectedDropinFilePath, firstUpdatedWorker.GetName())
o.Expect(wdropinFile.GetTextContent()).Should(o.Equal(expectedDropinContent))
exutil.By("Verify dropin file's content in master node")
mdropinFile := NewRemoteFile(firstUpdatedMaster, expectedDropinFilePath)
mfetchErr := mdropinFile.Fetch()
o.Expect(mfetchErr).ShouldNot(o.HaveOccurred(), "Error getting the content offile %s in node %s",
expectedDropinFilePath, firstUpdatedMaster.GetName())
o.Expect(mdropinFile.GetTextContent()).Should(o.Equal(expectedDropinContent))
})
| |||||
test case
|
openshift/openshift-tests-private
|
6dff3573-cbbf-402e-b615-4c8da1ed213c
|
Author:rioliu-NonPreRelease-Longduration-High-53668-[P1] when FIPS and realtime kernel are both enabled node should NOT be degraded [Disruptive]
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-53668-[P1] when FIPS and realtime kernel are both enabled node should NOT be degraded [Disruptive]", func() {
// skip if arm64. realtime kernel is not supported.
architecture.SkipNonAmd64SingleArch(oc)
// skip the test if fips is not enabled
skipTestIfFIPSIsNotEnabled(oc)
// skip the test if platform is not aws or gcp. realtime kargs currently supported on these platforms
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, GCPPlatform)
exutil.By("create machine config to enable fips ")
fipsMcName := "50-fips-bz-poc"
fipsMcTemplate := "bz2096496-dummy-mc-for-fips.yaml"
fipsMc := NewMachineConfig(oc.AsAdmin(), fipsMcName, MachineConfigPoolMaster).SetMCOTemplate(fipsMcTemplate)
defer fipsMc.delete()
fipsMc.create()
exutil.By("create machine config to enable RT kernel")
rtMcName := "50-realtime-kernel"
rtMcTemplate := "set-realtime-kernel.yaml"
rtMc := NewMachineConfig(oc.AsAdmin(), rtMcName, MachineConfigPoolMaster).SetMCOTemplate(rtMcTemplate)
// TODO: When we extract the "mcp.waitForComplete" from the "create" and "delete" methods, we need to take into account that if
// we are configuring a rt-kernel we need to wait longer.
defer rtMc.delete()
rtMc.create()
masterNode := NewNodeList(oc).GetAllMasterNodesOrFail()[0]
exutil.By("check whether fips is enabled")
fipsEnabled, fipsErr := masterNode.IsFIPSEnabled()
o.Expect(fipsErr).NotTo(o.HaveOccurred())
o.Expect(fipsEnabled).Should(o.BeTrue(), "fips is not enabled on node %s", masterNode.GetName())
exutil.By("check whether fips related kernel arg is enabled")
fipsKarg := "trigger-fips-issue=1"
fipsKargEnabled, fipsKargErr := masterNode.IsKernelArgEnabled(fipsKarg)
o.Expect(fipsKargErr).NotTo(o.HaveOccurred())
o.Expect(fipsKargEnabled).Should(o.BeTrue(), "fips related kernel arg %s is not enabled on node %s", fipsKarg, masterNode.GetName())
exutil.By("check whether RT kernel is enabled")
rtEnabled, rtErr := masterNode.IsKernelArgEnabled("PREEMPT_RT")
o.Expect(rtErr).NotTo(o.HaveOccurred())
o.Expect(rtEnabled).Should(o.BeTrue(), "RT kernel is not enabled on node %s", masterNode.GetName())
})
| |||||
test case
|
openshift/openshift-tests-private
|
34111c7f-697e-4908-9a05-9b5cae2933ad
|
Author:sregidor-NonPreRelease-Longduration-Critical-53960-[P2][OnCLayer] No failed units in the bootstrap machine
|
['bootstrap "github.com/openshift/openshift-tests-private/test/extended/util/bootstrap"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Critical-53960-[P2][OnCLayer] No failed units in the bootstrap machine", func() {
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, AzurePlatform)
failedUnitsCommand := "sudo systemctl list-units --failed --all"
// If no bootstrap is found, we skip the case.
// The test can only be executed in deployments that didn't remove the bootstrap machine
bs, err := bootstrap.GetBootstrap(oc)
if err != nil {
if _, notFound := err.(*bootstrap.InstanceNotFound); notFound {
g.Skip("skip test because bootstrap machine does not exist in the current cluster")
}
}
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Verify that there is no failed units in the bootstrap machine")
// ssh client is a bit unstable, and it can return an empty string for no apparent reason every now and then.
// Hence we use 'Eventually' to verify the command to make the test robust.
o.Eventually(func() string {
logger.Infof("Executing command in bootstrap: %s", failedUnitsCommand)
failedUnits, err := bs.SSH.RunOutput(failedUnitsCommand)
logger.Infof("Command output:\n%s", failedUnits)
if err != nil {
logger.Errorf("Command Error:\n%s", err)
}
return failedUnits
}).Should(o.ContainSubstring("0 loaded units listed"),
"There are failed units in the bootstrap machine")
})
| |||||
test case
|
openshift/openshift-tests-private
|
e8a2cd5d-f9a1-4f4e-a2e3-ceda56086154
|
Author:sregidor-NonPreRelease-Longduration-Medium-72129-[OnCLayer] Don't allow creating the force file via MachineConfig [Disruptive]
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-72129-[OnCLayer] Don't allow creating the force file via MachineConfig [Disruptive]", func() {
var (
filePath = "/run/machine-config-daemon-force"
fileContent = ""
fileMode = "0420" // decimal 272
fileConfig = getURLEncodedFileConfig(filePath, fileContent, fileMode)
mcName = "mco-tc-55879-create-force-file"
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
expectedRDMessage = regexp.QuoteMeta(fmt.Sprintf("cannot create %s via Ignition", filePath)) // quotemeta to scape regex characters in the file path
expectedRDReason = ""
)
exutil.By("Create the force file using a MC")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", fileConfig)}
mc.skipWaitForMcp = true
validateMcpRenderDegraded(mc, mcp, expectedRDMessage, expectedRDReason)
})
| |||||
test case
|
openshift/openshift-tests-private
|
4bbee12d-5424-4785-9e20-a717dceea998
|
Author:rioliu-NonHyperShiftHOST-Medium-54937-[P1][OnCLayer] logs and events are flood with clusterrole and clusterrolebinding [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:rioliu-NonHyperShiftHOST-Medium-54937-[P1][OnCLayer] logs and events are flood with clusterrole and clusterrolebinding [Disruptive]", func() {
exutil.By("get machine-config-operator pod name")
mcoPod, getMcoPodErr := getMachineConfigOperatorPod(oc)
o.Expect(getMcoPodErr).NotTo(o.HaveOccurred(), "get mco pod failed")
if exutil.CheckPlatform(oc) == "vsphere" { // check platformStatus.VSphere related log on vpshere cluster only
exutil.By("check infra/cluster info, make sure platformStatus.VSphere does not exist")
infra := NewResource(oc.AsAdmin(), "infrastructure", "cluster")
vsphereStatus, getStatusErr := infra.Get(`{.status.platformStatus.VSphere}`)
o.Expect(getStatusErr).NotTo(o.HaveOccurred(), "check vsphere status failed")
// check vsphereStatus exists or not, only check logs if it exists, otherwise skip the test
if vsphereStatus == "" {
exutil.By("check vsphere related log in machine-config-operator pod")
filteredVsphereLog, filterVsphereLogErr := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigOperator, mcoPod, "PlatformStatus.VSphere")
// if no platformStatus.Vsphere log found, the func will return error, that's expected
logger.Debugf("filtered vsphere log:\n %s", filteredVsphereLog)
o.Expect(filterVsphereLogErr).Should(o.HaveOccurred(), "found vsphere related log in mco pod")
}
}
// check below logs for all platforms
exutil.By("check clusterrole and clusterrolebinding related logs in machine-config-operator pod")
filteredClusterRoleLog, filterClusterRoleLogErr := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigOperator, mcoPod, "ClusterRoleUpdated")
logger.Debugf("filtered clusterrole log:\n %s", filteredClusterRoleLog)
o.Expect(filterClusterRoleLogErr).Should(o.HaveOccurred(), "found ClusterRoleUpdated log in mco pod")
filteredClusterRoleBindingLog, filterClusterRoleBindingLogErr := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigOperator, mcoPod, "ClusterRoleBindingUpdated")
logger.Debugf("filtered clusterrolebinding log:\n %s", filteredClusterRoleBindingLog)
o.Expect(filterClusterRoleBindingLogErr).Should(o.HaveOccurred(), "found ClusterRoleBindingUpdated log in mco pod")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
1e1950b1-61eb-4dd9-ab0b-0f739dac171a
|
Author:sregidor-NonPreRelease-Longduration-Medium-54922-[P2] daemon: add check before updating kernelArgs [Disruptive]
|
['"fmt"', '"regexp"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-54922-[P2] daemon: add check before updating kernelArgs [Disruptive]", func() {
var (
mcNameArg1 = "tc-54922-kernel-args-1"
mcNameArg2 = "tc-54922-kernel-args-2"
mcNameExt = "tc-54922-extension"
kernelArg1 = "test1"
kernelArg2 = "test2"
usbguardMCTemplate = "change-worker-extension-usbguard.yaml"
expectedLogArg1Regex = regexp.QuoteMeta("Running rpm-ostree [kargs") + ".*" + regexp.QuoteMeta(fmt.Sprintf("--append=%s", kernelArg1)) +
".*" + regexp.QuoteMeta("]")
expectedLogArg2Regex = regexp.QuoteMeta("Running rpm-ostree [kargs") + ".*" + regexp.QuoteMeta(fmt.Sprintf("--delete=%s", kernelArg1)) +
".*" + regexp.QuoteMeta(fmt.Sprintf("--append=%s", kernelArg1)) +
".*" + regexp.QuoteMeta(fmt.Sprintf("--append=%s", kernelArg2)) +
".*" + regexp.QuoteMeta("]")
// Expr: "kargs .*--append|kargs .*--delete"
// We need to scape the "--" characters
expectedNotLogExtensionRegex = "kargs .*" + regexp.QuoteMeta("--") + "append|kargs .*" + regexp.QuoteMeta("--") + "delete"
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, GCPPlatform)
workerNode := skipTestIfOsIsNotCoreOs(oc)
mcp.SetWaitingTimeForExtensionsChange()
// Create MC to add kernel arg 'test1'
exutil.By(fmt.Sprintf("Create a MC to add a kernel arg: %s", kernelArg1))
mcArgs1 := NewMachineConfig(oc.AsAdmin(), mcNameArg1, MachineConfigPoolWorker)
mcArgs1.parameters = []string{fmt.Sprintf(`KERNEL_ARGS=["%s"]`, kernelArg1)}
mcArgs1.skipWaitForMcp = true
defer mcArgs1.delete()
mcArgs1.create()
logger.Infof("OK!\n")
exutil.By("Check that the MCD logs are tracing the new kernel argument")
// We don't know if the selected node will be updated first or last, so we have to wait
// the same time we would wait for the mcp to be updated. Aprox.
timeToWait := mcp.estimateWaitDuration()
logger.Infof("waiting time: %s", timeToWait.String())
o.Expect(workerNode.CaptureMCDaemonLogsUntilRestartWithTimeout(timeToWait.String())).To(
o.MatchRegexp(expectedLogArg1Regex),
"A log line reporting new kernel arguments should be present in the MCD logs when we add a kernel argument via MC")
logger.Infof("OK!\n")
exutil.By("Wait for worker pool to be updated")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the new kernel argument was added")
o.Expect(workerNode.IsKernelArgEnabled(kernelArg1)).To(o.BeTrue(),
"Kernel argument %s was not enabled in the node %s", kernelArg1, workerNode.GetName())
logger.Infof("OK!\n")
// Create MC to add kernel arg 'test2'
exutil.By(fmt.Sprintf("Create a MC to add a kernel arg: %s", kernelArg2))
mcArgs2 := NewMachineConfig(oc.AsAdmin(), mcNameArg2, MachineConfigPoolWorker)
mcArgs2.parameters = []string{fmt.Sprintf(`KERNEL_ARGS=["%s"]`, kernelArg2)}
mcArgs2.skipWaitForMcp = true
defer mcArgs2.deleteNoWait()
mcArgs2.create()
logger.Infof("OK!\n")
exutil.By("Check that the MCD logs are tracing both kernel arguments")
// We don't know if the selected node will be updated first or last, so we have to wait
// the same time we would wait for the mcp to be updated. Aprox.
logger.Infof("waiting time: %s", timeToWait.String())
o.Expect(workerNode.CaptureMCDaemonLogsUntilRestartWithTimeout(timeToWait.String())).To(
o.MatchRegexp(expectedLogArg2Regex),
"A log line reporting the new kernel arguments configuration should be present in MCD logs")
logger.Infof("OK!\n")
exutil.By("Wait for worker pool to be updated")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the both kernel arguments were added")
o.Expect(workerNode.IsKernelArgEnabled(kernelArg1)).To(o.BeTrue(),
"Kernel argument %s was not enabled in the node %s", kernelArg1, workerNode.GetName())
o.Expect(workerNode.IsKernelArgEnabled(kernelArg2)).To(o.BeTrue(),
"Kernel argument %s was not enabled in the node %s", kernelArg2, workerNode.GetName())
logger.Infof("OK!\n")
// Create MC to deploy an usbguard extension
exutil.By("Create MC to add usbguard extension")
mcUsb := NewMachineConfig(oc.AsAdmin(), mcNameExt, MachineConfigPoolWorker).SetMCOTemplate(usbguardMCTemplate)
mcUsb.skipWaitForMcp = true
defer mcUsb.deleteNoWait()
mcUsb.create()
logger.Infof("OK!\n")
exutil.By("Check that the MCD logs do not make any reference to add or delete kargs")
o.Expect(workerNode.CaptureMCDaemonLogsUntilRestartWithTimeout(timeToWait.String())).NotTo(
o.MatchRegexp(expectedNotLogExtensionRegex),
"MCD logs should not make any reference to kernel arguments addition/deletion when no new kernel arg is added/deleted")
logger.Infof("OK!\n")
exutil.By("Wait for worker pool to be updated")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the usbguard extension was added")
o.Expect(workerNode.RpmIsInstalled("usbguard")).To(
o.BeTrue(),
"usbguard rpm should be installed in node %s", workerNode.GetName())
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
045bce4d-a657-41f4-bdbd-87187cfda332
|
Author:sregidor-NonPreRelease-Longduration-Medium-56123-[OnCLayer] Invalid extensions should degrade the machine config pool [Disruptive]
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-56123-[OnCLayer] Invalid extensions should degrade the machine config pool [Disruptive]", func() {
var (
validExtension = "usbguard"
invalidExtension = "zsh"
mcName = "mco-tc-56123-invalid-extension"
mcp = GetCompactCompatiblePool(oc)
expectedRDMessage = regexp.QuoteMeta(fmt.Sprintf("invalid extensions found: [%s]", invalidExtension)) // quotemeta to scape regex characters
expectedRDReason = ""
)
exutil.By("Create a MC with invalid extensions")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`EXTENSIONS=["%s", "%s"]`, validExtension, invalidExtension)}
mc.skipWaitForMcp = true
validateMcpRenderDegraded(mc, mcp, expectedRDMessage, expectedRDReason)
})
| |||||
test case
|
openshift/openshift-tests-private
|
492eb838-fe4c-4771-8bb3-764289b8163b
|
Author:rioliu-NonHyperShiftHOST-Medium-54974-[P1][OnCLayer] silence audit log events for container infra
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:rioliu-NonHyperShiftHOST-Medium-54974-[P1][OnCLayer] silence audit log events for container infra", func() {
auditRuleFile := "/etc/audit/rules.d/mco-audit-quiet-containers.rules"
auditLogFile := "/var/log/audit/audit.log"
allCoreOsNodes := NewNodeList(oc.AsAdmin()).GetAllCoreOsNodesOrFail()
for _, node := range allCoreOsNodes {
if node.HasTaintEffectOrFail("NoExecute") {
logger.Infof("Node %s is tainted with 'NoExecute'. Validation skipped.", node.GetName())
continue
}
exutil.By(fmt.Sprintf("log into node %s to check audit rule file exists or not", node.GetName()))
o.Expect(node.DebugNodeWithChroot("stat", auditRuleFile)).ShouldNot(
o.ContainSubstring("No such file or directory"),
"The audit rules file %s should exist in the nodes", auditRuleFile)
exutil.By("check expected msgtype in audit log rule file")
grepOut, _ := node.DebugNodeWithOptions([]string{"--quiet"}, "chroot", "/host", "bash", "-c", fmt.Sprintf("grep -E 'NETFILTER_CFG|ANOM_PROMISCUOUS' %s", auditRuleFile))
o.Expect(grepOut).NotTo(o.BeEmpty(), "expected excluded audit log msgtype not found")
o.Expect(grepOut).Should(o.And(
o.ContainSubstring("NETFILTER_CFG"),
o.ContainSubstring("ANOM_PROMISCUOUS"),
), "audit log rules does not have excluded msstype NETFILTER_CFG and ANOM_PROMISCUOUS")
exutil.By(fmt.Sprintf("check audit log on node %s, make sure msg types NETFILTER_CFG and ANOM_PROMISCUOUS are excluded", node.GetName()))
filteredLog, _ := node.DebugNodeWithChroot("bash", "-c", fmt.Sprintf("grep -E 'NETFILTER_CFG|ANOM_PROMISCUOUS' %s", auditLogFile))
o.Expect(filteredLog).ShouldNot(o.Or(
o.ContainSubstring("NETFILTER_CFG"),
o.ContainSubstring("ANOM_PROMISCUOUS"),
), "audit log contains excluded msgtype NETFILTER_CFG or ANOM_PROMISCUOUS")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
b12068db-6a1a-4416-bbb5-c876e25491dc
|
Author:sregidor-DEPRECATED-NonPreRelease-Longduration-Medium-56706-Move MCD drain alert into the MCC, revisit error mode[Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-DEPRECATED-NonPreRelease-Longduration-Medium-56706-Move MCD drain alert into the MCC, revisit error mode[Disruptive]", func() {
var (
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcc = NewController(oc.AsAdmin())
nsName = oc.Namespace()
pdbName = "dont-evict-43279"
podName = "dont-evict-43279"
podTemplate = generateTemplateAbsolutePath("create-pod.yaml")
mcName = "test-file"
mcTemplate = "add-mc-to-trigger-node-drain.yaml"
expectedAlertName = "MCCDrainError"
)
// Get the first node that will be updated
// Not all nodes are valid. We need to deploy the "dont-evict-pod" and we can only do that in schedulable nodes
// In "edge" clusters, the "edge" nodes are not schedulable, so we need to be careful and not to use them to deploy our pod
schedulableNodes := FilterSchedulableNodesOrFail(mcp.GetSortedNodesOrFail())
o.Expect(schedulableNodes).NotTo(o.BeEmpty(), "There are no schedulable worker nodes!!")
workerNode := schedulableNodes[0]
exutil.By("Start machine-config-controller logs capture")
ignoreMccLogErr := mcc.IgnoreLogsBeforeNow()
o.Expect(ignoreMccLogErr).NotTo(o.HaveOccurred(), "Ignore mcc log failed")
logger.Infof("OK!\n")
exutil.By("Create a pod disruption budget to set minAvailable to 1")
pdbTemplate := generateTemplateAbsolutePath("pod-disruption-budget.yaml")
pdb := PodDisruptionBudget{name: pdbName, namespace: nsName, template: pdbTemplate}
defer pdb.delete(oc)
pdb.create(oc)
logger.Infof("OK!\n")
exutil.By("Create new pod for pod disruption budget")
hostname, err := workerNode.GetNodeHostname()
o.Expect(err).NotTo(o.HaveOccurred())
pod := exutil.Pod{Name: podName, Namespace: nsName, Template: podTemplate, Parameters: []string{"HOSTNAME=" + hostname}}
defer func() { o.Expect(pod.Delete(oc)).NotTo(o.HaveOccurred()) }()
pod.Create(oc)
logger.Infof("OK!\n")
exutil.By("Create new mc to add new file on the node and trigger node drain")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker).SetMCOTemplate(mcTemplate)
mc.skipWaitForMcp = true
defer mc.delete()
defer func() {
_ = pod.Delete(oc)
mcp.WaitForNotDegradedStatus()
}()
mc.create()
logger.Infof("OK!\n")
exutil.By("Wait until node is cordoned")
o.Eventually(workerNode.Poll(`{.spec.taints[?(@.effect=="NoSchedule")].effect}`),
"20m", "1m").Should(o.Equal("NoSchedule"), fmt.Sprintf("Node %s was not cordoned", workerNode.name))
logger.Infof("OK!\n")
exutil.By("Verify that node is not degraded until the alarm timeout")
o.Consistently(mcp.pollDegradedStatus(),
"58m", "5m").Should(o.Equal("False"),
"The worker MCP was degraded too soon. The worker MCP should not be degraded until 1 hour timeout happens")
logger.Infof("OK!\n")
exutil.By("Verify that node is degraded after the 1h timeout")
o.Eventually(mcp.pollDegradedStatus(),
"5m", "1m").Should(o.Equal("True"),
"1 hour passed since the eviction problems were reported and the worker MCP has not been degraded")
logger.Infof("OK!\n")
exutil.By("Verify that the error is properly reported in the controller pod's logs")
logger.Debugf("CONTROLLER LOGS BEGIN!\n")
logger.Debugf(mcc.GetFilteredLogs(workerNode.GetName()))
logger.Debugf("CONTROLLER LOGS END!\n")
o.Expect(mcc.GetFilteredLogs(workerNode.GetName())).Should(
o.ContainSubstring("node %s: drain exceeded timeout: 1h0m0s. Will continue to retry.",
workerNode.GetName()),
"The eviction problem is not properly reported in the MCController pod logs")
logger.Infof("OK!\n")
exutil.By("Verify that the error is properly reported in the MachineConfigPool status")
nodeDegradedCondition := mcp.GetConditionByType("NodeDegraded")
nodeDegradedConditionJSON := JSON(nodeDegradedCondition)
nodeDegradedMessage := nodeDegradedConditionJSON.Get("message").ToString()
expectedDegradedNodeMessage := fmt.Sprintf("failed to drain node: %s after 1 hour. Please see machine-config-controller logs for more information", workerNode.GetName())
logger.Infof("MCP NodeDegraded condition: %s", nodeDegradedCondition)
o.Expect(nodeDegradedMessage).To(o.ContainSubstring(expectedDegradedNodeMessage),
"The error reported in the MCP NodeDegraded condition in not the expected one")
logger.Infof("OK!\n")
exutil.By("Verify that the alert is triggered")
var alertJSON []JSONData
var alertErr error
o.Eventually(func() ([]JSONData, error) {
alertJSON, alertErr = getAlertsByName(oc, expectedAlertName)
return alertJSON, alertErr
}, "5m", "20s").Should(o.HaveLen(1),
"Expected 1 %s alert and only 1 to be triggered!", expectedAlertName)
logger.Infof("OK!\n")
exutil.By("Verify that the alert has the right message")
logger.Infof("Found %s alerts: %s", expectedAlertName, alertJSON)
expectedDescription := fmt.Sprintf("Drain failed on %s , updates may be blocked. For more details check MachineConfigController pod logs: oc logs -f -n openshift-machine-config-operator machine-config-controller-xxxxx -c machine-config-controller", workerNode.GetName())
o.Expect(alertJSON[0].Get("annotations").Get("description").ToString()).Should(o.ContainSubstring(expectedDescription),
"The error description should make a reference to the pod info")
expectedSummary := "Alerts the user to a failed node drain. Always triggers when the failure happens one or more times."
o.Expect(alertJSON[0].Get("annotations").Get("summary").ToString()).Should(o.Equal(expectedSummary),
"The alert has a wrong 'summary' annotation value")
// Since OCPBUGS-904 we need to check that the namespace is reported properly
o.Expect(alertJSON[0].Get("labels").Get("namespace").ToString()).Should(o.Equal(MachineConfigNamespace),
"The alert's namespace has not the right value")
logger.Infof("OK!\n")
exutil.By("Remove the pod disruption budget")
pdb.delete(oc)
logger.Infof("OK!\n")
exutil.By("Verfiy that the pool stops being degraded")
o.Eventually(mcp.pollDegradedStatus(),
"10m", "30s").Should(o.Equal("False"),
"After removing the PodDisruptionBudget the eviction should have succeeded and the worker pool should stop being degraded")
logger.Infof("OK!\n")
exutil.By("Verfiy that the alert is not triggered anymore")
o.Eventually(getAlertsByName, "5m", "20s").WithArguments(oc, expectedAlertName).
Should(o.HaveLen(0),
"Alert is not removed after the problem is fixed!")
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
90b34ea3-7df3-4045-90fc-14db801a4cdf
|
Author:sregidor-NonPreRelease-Longduration-Medium-56614-[P2][OnCLayer] Create unit with content and mask=true[Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-56614-[P2][OnCLayer] Create unit with content and mask=true[Disruptive]", func() {
var (
workerNode = NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()[0]
maskedString = "Loaded: masked"
inactiveString = "Active: inactive (dead)"
mcName = "tc-56614-maks-and-contents"
svcName = "tc-56614-maks-and-contents.service"
svcContents = "[Unit]\nDescription=Just random content for test case 56614"
maskSvcConfig = getMaskServiceWithContentsConfig(svcName, true, svcContents)
)
exutil.By("Create a MachineConfig resource to mask the chronyd service")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.parameters = []string{fmt.Sprintf("UNITS=[%s]", maskSvcConfig)}
defer mc.delete()
mc.create()
logger.Infof("OK!\n")
exutil.By("Wait until worker MachineConfigPool has finished the configuration")
mcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Validate that the service is masked")
output, _ := workerNode.DebugNodeWithChroot("systemctl", "status", svcName)
// Since the service is masked, the "systemctl status" command will return a value != 0 and an error will be reported
// So we dont check the error, only the output
o.Expect(output).Should(o.And(
o.ContainSubstring(inactiveString),
o.ContainSubstring(maskedString),
),
"Service %s should be inactive and masked, but it is not.", svcName)
logger.Infof("OK!\n")
exutil.By("Validate the content")
rf := NewRemoteFile(workerNode, "/etc/systemd/system/"+svcName)
rferr := rf.Stat()
o.Expect(rferr).NotTo(o.HaveOccurred())
o.Expect(rf.GetSymLink()).Should(o.Equal(fmt.Sprintf("'/etc/systemd/system/%s' -> '/dev/null'", svcName)),
"The service is masked, so service's file should be a link to /dev/null")
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
82fb4cd1-1440-4995-b942-2a5460f45db3
|
Author:sregidor-NonPreRelease-Longduration-Medium-57595-[OnCLayer] Use empty pull-secret[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-57595-[OnCLayer] Use empty pull-secret[Disruptive]", func() {
var (
pullSecret = GetPullSecret(oc.AsAdmin())
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
// If the cluster is using extensions, empty pull-secret will break the pools because images' validation is mandatory
skipTestIfExtensionsAreUsed(oc.AsAdmin())
// If RT kernel is enabled, empty pull-secrets will break the pools because the image's validation is mandatory
skipTestIfRTKernel(oc.AsAdmin())
exutil.By("Capture the current pull-secret value")
// We don't use the pullSecret resource directly, instead we use auxiliary functions that will
// extract and restore the secret's values using a file. Like that we can recover the value of the pull-secret
// if our execution goes wrong, without printing it in the logs (for security reasons).
secretFile, err := getPullSecret(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the pull-secret")
logger.Debugf("Pull-secret content stored in file %s", secretFile)
defer func() {
logger.Infof("Start defer func")
logger.Infof("Restoring initial pull-secret value")
output, err := setDataForPullSecret(oc, secretFile)
if err != nil {
logger.Errorf("Error restoring the pull-secret's value. Error: %s\nOutput: %s", err, output)
}
wMcp.waitForComplete()
mMcp.waitForComplete()
logger.Infof("End defer func")
}()
logger.Infof("OK!\n")
exutil.By("Set an empty pull-secret")
o.Expect(pullSecret.SetDataValue(".dockerconfigjson", "{}")).To(o.Succeed(),
"Error setting an empty pull-secret value")
logger.Infof("OK!\n")
exutil.By("Wait for machine config poools to be udated")
logger.Infof("Wait for worker pool to be updated")
wMcp.waitForComplete()
logger.Infof("Wait for master pool to be updated")
mMcp.waitForComplete()
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
6c092163-11d4-4915-b284-ff463a0943be
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-[P2] Longduration-Medium-72132-[OnCLayer] enable FIPS by Machine-Config-Operator not supported [Disruptive]
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-[P2] Longduration-Medium-72132-[OnCLayer] enable FIPS by Machine-Config-Operator not supported [Disruptive]", func() {
var (
mcTemplate = "change-fips.yaml"
mcName = "mco-tc-25819-master-fips"
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
expectedRDMessage = regexp.QuoteMeta("detected change to FIPS flag; refusing to modify FIPS on a running cluster")
expectedRDReason = ""
)
// If FIPS is already enabled, we skip the test case
skipTestIfFIPSIsEnabled(oc.AsAdmin())
exutil.By("Try to enable FIPS in master pool")
mMc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolMaster).SetMCOTemplate(mcTemplate)
mMc.parameters = []string{"FIPS=true"}
mMc.skipWaitForMcp = true
validateMcpRenderDegraded(mMc, mMcp, expectedRDMessage, expectedRDReason)
logger.Infof("OK!\n")
exutil.By("Try to enable FIPS in worker pool")
wMc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker).SetMCOTemplate(mcTemplate)
wMc.parameters = []string{"FIPS=true"}
wMc.skipWaitForMcp = true
validateMcpRenderDegraded(wMc, wMcp, expectedRDMessage, expectedRDReason)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
7ef51d38-91ea-4bbf-b238-6a16eda20c98
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Low-72135-[P1][OnCLayer] Refuse to disable FIPS mode by Machine-Config-Operator [Disruptive]
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Low-72135-[P1][OnCLayer] Refuse to disable FIPS mode by Machine-Config-Operator [Disruptive]", func() {
var (
mMcName = "99-master-fips"
wMcName = "99-worker-fips"
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
mMc = NewMachineConfig(oc.AsAdmin(), mMcName, MachineConfigPoolMaster)
wMc = NewMachineConfig(oc.AsAdmin(), wMcName, MachineConfigPoolWorker)
expectedRDMessage = regexp.QuoteMeta("detected change to FIPS flag; refusing to modify FIPS on a running cluster")
expectedRDReason = ""
)
// If FIPS is already disabled, we skip the test case
skipTestIfFIPSIsNotEnabled(oc.AsAdmin())
defer func() {
logger.Infof("Starting defer logic")
mMc.Patch("merge", `{"spec":{"fips": true}}`)
wMc.Patch("merge", `{"spec":{"fips": true}}`)
wMcp.RecoverFromDegraded()
mMcp.RecoverFromDegraded()
}()
exutil.By("Patch the master-fips MC and set fips=false")
mMc.Patch("merge", `{"spec":{"fips": false}}`)
checkDegraded(mMcp, expectedRDMessage, expectedRDReason, "RenderDegraded", false, 1)
exutil.By("Try to disasble FIPS in worker pool")
wMc.Patch("merge", `{"spec":{"fips": false}}`)
checkDegraded(wMcp, expectedRDMessage, expectedRDReason, "RenderDegraded", false, 1)
})
| |||||
test case
|
openshift/openshift-tests-private
|
41cdb011-bb5d-4508-971a-812effa3409e
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-59837-[P2] Use wrong user when creating a file [Disruptive]
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-59837-[P2] Use wrong user when creating a file [Disruptive]", func() {
var (
mcName = "mco-tc-59837-create-file-with-wrong-user"
wrongUserFileConfig = `{"contents": {"source": "data:text/plain;charset=utf-8;base64,dGVzdA=="},"mode": 420,"path": "/etc/wronguser-test-file.test","user": {"name": "wronguser"}}`
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
// quotemeta to scape regex characters in the file path
expectedNDMessage = regexp.QuoteMeta(`failed to retrieve file ownership for file \"/etc/wronguser-test-file.test\": failed to retrieve UserID for username: wronguser`)
expectedNDReason = "1 nodes are reporting degraded status on sync"
)
exutil.By("Create the force file using a MC")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", wrongUserFileConfig)}
mc.skipWaitForMcp = true
validateMcpNodeDegraded(mc, mcp, expectedNDMessage, expectedNDReason, true)
})
| |||||
test case
|
openshift/openshift-tests-private
|
52370e97-b7ef-4582-9702-a328b6904c38
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-59867-Create files specifying user and group [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-59867-Create files specifying user and group [Disruptive]", func() {
var (
filesContent = "test"
coreUserID = 1000
coreGroupID = 1000
rootUserID = 0
admGroupID = 4
allFiles = []ign32File{
{
Path: "/etc/core-core-name-test-file.test",
Contents: ign32Contents{
Source: GetBase64EncodedFileSourceContent(filesContent),
},
Mode: PtrInt(420), // decimal 0644
User: &ign32FileUser{
Name: "core",
},
Group: &ign32FileGroup{
Name: "core",
},
},
{
Path: "/etc/core-core-id-test-file.test",
Contents: ign32Contents{
Source: GetBase64EncodedFileSourceContent(filesContent),
},
Mode: PtrInt(416), // decimal 0640
User: &ign32FileUser{
ID: PtrInt(coreUserID), // core user ID number
},
Group: &ign32FileGroup{
ID: PtrInt(coreGroupID), // core group ID number
},
},
{
Path: "/etc/root-adm-id-test-file.test",
Contents: ign32Contents{
Source: GetBase64EncodedFileSourceContent(filesContent),
},
Mode: PtrInt(384), // decimal 0600
User: &ign32FileUser{
ID: PtrInt(rootUserID),
},
Group: &ign32FileGroup{
ID: PtrInt(admGroupID),
},
},
{
Path: "/etc/nouser-test-file.test",
Contents: ign32Contents{
Source: GetBase64EncodedFileSourceContent(filesContent),
},
Mode: PtrInt(420), // decimal 0644
User: &ign32FileUser{
ID: PtrInt(12343), // this user does not exist
},
Group: &ign32FileGroup{
ID: PtrInt(34321), // this group does not exist
},
},
}
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
workerNode = wMcp.GetNodesOrFail()[0] // we don't want to get "windows" nodes
)
// Maybe in the future we can add some logic to create the "core" user and the "core" group with the right IDs
// Now we will skip the test case to avoid breaking executions with RHEL nodes.
if len(NewNodeList(oc).GetAllRhelWokerNodesOrFail()) != 0 {
g.Skip("There are yum based RHEL nodes in the cluster. This test cannot be executed because no 'core' user/group exist in RHEL nodes")
}
exutil.By("Create new machine config to create files with different users and groups")
fileConfig := MarshalOrFail(allFiles)
mcName := "tc-59867-create-files-with-users"
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.parameters = []string{fmt.Sprintf("FILES=%s", fileConfig)}
defer mc.delete()
mc.create()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that all files have been created with the right user, group, permissions and data")
for _, file := range allFiles {
logger.Infof("")
logger.Infof("CHecking file: %s", file.Path)
rf := NewRemoteFile(workerNode, file.Path)
o.Expect(rf.Fetch()).NotTo(o.HaveOccurred(), "Error getting the file %s in node %s", file.Path, workerNode.GetName())
logger.Infof("Checking content: %s", rf.GetTextContent())
o.Expect(rf.GetTextContent()).To(o.Equal(filesContent),
"The content of file %s is wrong!", file.Path)
// Verify that the defined user name or user id (only one can be defined in the config) is the expected one
if file.User.Name != "" {
logger.Infof("Checking user name: %s", rf.GetUIDName())
o.Expect(rf.GetUIDName()).To(o.Equal(file.User.Name),
"The user who owns file %s is wrong!", file.Path)
} else {
logger.Infof("Checking user id: %s", rf.GetUIDNumber())
o.Expect(rf.GetUIDNumber()).To(o.Equal(fmt.Sprintf("%d", *file.User.ID)),
"The user id what owns file %s is wrong!", file.Path)
}
// Verify that if the user ID is defined and its value is the core user's one. Then the name should be "core"
if file.User.ID != nil && *file.User.ID == coreUserID {
logger.Infof("Checking core user name for ID: %s", rf.GetUIDNumber())
o.Expect(rf.GetUIDName()).To(o.Equal("core"),
"The user name who owns file %s is wrong! User name for Id %s should be 'core'",
file.Path, rf.GetUIDNumber())
}
// Verify that if the user ID is defined and its value is the root user's one. Then the name should be "root"
if file.User.ID != nil && *file.User.ID == rootUserID {
logger.Infof("Checking root user name: %s", rf.GetUIDName())
o.Expect(rf.GetUIDName()).To(o.Equal("root"),
"The user name who owns file %s is wrong! User name for Id %s should be 'root'",
file.Path, rf.GetUIDNumber())
}
// Verify that the defined group name or group id (only one can be defined in the config) is the expected one
if file.Group.Name != "" {
logger.Infof("Checking group name: %s", rf.GetGIDName())
o.Expect(rf.GetGIDName()).To(o.Equal(file.Group.Name),
"The group that owns file %s is wrong!", file.Path)
} else {
logger.Infof("Checking group id: %s", rf.GetGIDNumber())
o.Expect(rf.GetGIDNumber()).To(o.Equal(fmt.Sprintf("%d", *file.Group.ID)),
"The group id what owns file %s is wrong!", file.Path)
}
// Verify that if the group ID is defined and its value is the core group's one. Then the name should be "core"
if file.Group.ID != nil && *file.Group.ID == coreGroupID {
logger.Infof("Checking core group name for ID: %s", rf.GetUIDNumber())
o.Expect(rf.GetGIDName()).To(o.Equal("core"),
"The group name who owns file %s is wrong! Group name for Id %s should be 'core'",
file.Path, rf.GetGIDNumber())
}
// Verify that if the group ID is defined and its value is the adm group's one. Then the name should be "adm"
if file.Group.ID != nil && *file.Group.ID == admGroupID {
logger.Infof("Checking adm group name: %s", rf.GetUIDNumber())
o.Expect(rf.GetGIDName()).To(o.Equal("adm"),
"The group name who owns file %s is wrong! Group name for Id %s should be 'adm'",
file.Path, rf.GetGIDNumber())
}
logger.Infof("Checking file permissions: %s", rf.GetNpermissions())
decimalPerm := ConvertOctalPermissionsToDecimalOrFail(rf.GetNpermissions())
o.Expect(decimalPerm).To(o.Equal(*file.Mode),
"The permssions of file %s are wrong", file.Path)
logger.Infof("OK!\n")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
6a622931-cd69-46ff-9309-5a22bb1977c6
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-61555-[P1] ImageDigestMirrorSet test [Disruptive]
|
['"regexp"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-61555-[P1] ImageDigestMirrorSet test [Disruptive]", func() {
var (
idmsName = "tc-61555-digest-mirror"
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
node = mcp.GetNodesOrFail()[0]
)
// ImageDigetsMirrorSet is not compatible with ImageContentSourcePolicy.
// If any ImageContentSourcePolicy exists we skip this test case.
skipTestIfImageContentSourcePolicyExists(oc.AsAdmin())
exutil.By("Start capturing events and clean pods logs")
startTime, dErr := node.GetDate()
o.Expect(dErr).ShouldNot(o.HaveOccurred(), "Error getting date in node %s", node.GetName())
o.Expect(node.IgnoreEventsBeforeNow()).NotTo(o.HaveOccurred(),
"Error getting the latest event in node %s", node.GetName())
logger.Infof("Removing all MCD pods to clean the logs.")
o.Expect(RemoveAllMCDPods(oc)).To(o.Succeed(), "Error removing all MCD pods in %s namespace", MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Create new machine config to deploy a ImageDigestMirrorSet configuring a mirror registry")
idms := NewImageDigestMirrorSet(oc.AsAdmin(), idmsName, *NewMCOTemplate(oc, "add-image-digest-mirror-set.yaml"))
defer mcp.waitForComplete()
defer idms.Delete()
idms.Create("-p", "NAME="+idmsName)
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check logs to verify that no drain operation happened.")
log, err := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigDaemon, node.GetMachineConfigDaemon(), "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(log).Should(o.ContainSubstring("Changes do not require drain, skipping"))
logger.Infof("OK!\n")
exutil.By("Check logs to verify that crio service was reloaded.")
o.Expect(log).Should(o.MatchRegexp(MCDCrioReloadedRegexp))
logger.Infof("OK!\n")
exutil.By("Verify that the node was NOT rebooted")
o.Expect(node.GetUptime()).Should(o.BeTemporally("<", startTime),
"The node %s must NOT be rebooted after applying the configuration, but it was rebooted. Uptime date happened after the start config time.", node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that no drain nor reboot events were triggered")
nodeEvents, eErr := node.GetEvents()
o.Expect(eErr).ShouldNot(o.HaveOccurred(), "Error getting drain events for node %s", node.GetName())
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Drain"), "Error, a Drain event was triggered but it shouldn't")
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Reboot"), "Error, a Reboot event was triggered but it shouldn't")
logger.Infof("OK!\n")
exutil.By("Check that the /etc/containers/registries.conf file was configured")
rf := NewRemoteFile(node, "/etc/containers/registries.conf")
o.Expect(rf.Fetch()).To(o.Succeed(),
"Error getting file /etc/containers/registries.conf")
configRegex := `(?s)` + regexp.QuoteMeta(`[[registry]]`) + ".*" +
regexp.QuoteMeta(`registry.access.redhat.com/ubi8/ubi-minimal`) + ".*" +
regexp.QuoteMeta(`[[registry.mirror]]`) + ".*" +
regexp.QuoteMeta(`example.io/digest-example/ubi-minimal`) + ".*" +
`pull-from-mirror *= *"digest-only"`
o.Expect(rf.GetTextContent()).To(o.MatchRegexp(configRegex),
"The file /etc/containers/registries.conf has not been properly configured with the new mirror information")
logger.Infof("OK!\n")
exutil.By("Delete the ImageDigestMirrorSet resource")
removeTime := node.GetDateOrFail()
idms.Delete()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the configuration in file /etc/containers/registries.conf was restored")
o.Expect(rf.Fetch()).To(o.Succeed(),
"Error getting file /etc/containers/registries.conf")
o.Expect(rf.GetTextContent()).NotTo(o.ContainSubstring(`example.io/digest-example/ubi-minimal`),
"The configuration in file /etc/containers/registries.conf was not restored after deleting the ImageDigestMirrorSet resource")
logger.Infof("OK!\n")
checkMirrorRemovalDefaultEvents(node, removeTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
9e47f917-35f8-4924-b0ee-bda07c48f74c
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-61558-[P2] ImageTagMirrorSet test [Disruptive]
|
['"regexp"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-61558-[P2] ImageTagMirrorSet test [Disruptive]", func() {
var (
itmsName = "tc-61558-tag-mirror"
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
node = mcp.GetNodesOrFail()[0]
)
// ImageTagMirrorSet is not compatible with ImageContentSourcePolicy.
// If any ImageContentSourcePolicy exists we skip this test case.
skipTestIfImageContentSourcePolicyExists(oc.AsAdmin())
exutil.By("Start capturing events and clean pods logs")
startTime, dErr := node.GetDate()
o.Expect(dErr).ShouldNot(o.HaveOccurred(), "Error getting date in node %s", node.GetName())
o.Expect(node.IgnoreEventsBeforeNow()).NotTo(o.HaveOccurred(),
"Error getting the latest event in node %s", node.GetName())
logger.Infof("Removing all MCD pods to clean the logs.")
o.Expect(RemoveAllMCDPods(oc)).To(o.Succeed(), "Error removing all MCD pods in %s namespace", MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Create new machine config to deploy a ImageTagMirrorSet configuring a mirror registry")
itms := NewImageTagMirrorSet(oc.AsAdmin(), itmsName, *NewMCOTemplate(oc, "add-image-tag-mirror-set.yaml"))
defer mcp.waitForComplete()
defer itms.Delete()
itms.Create("-p", "NAME="+itmsName)
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check logs to verify that a drain operation was executed.")
log, err := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigDaemon, node.GetMachineConfigDaemon(), "")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(log).Should(o.ContainSubstring("requesting cordon and drain"))
logger.Infof("OK!\n")
exutil.By("Check logs to verify that crio service was reloaded.")
o.Expect(log).Should(o.MatchRegexp(MCDCrioReloadedRegexp))
logger.Infof("OK!\n")
exutil.By("Verify that the node was NOT rebooted")
o.Expect(node.GetUptime()).Should(o.BeTemporally("<", startTime),
"The node %s must NOT be rebooted after applying the configuration, but it was rebooted. Uptime date happened after the start config time.", node.GetName())
logger.Infof("OK!\n")
exutil.By("Check no reboot events were triggered")
nodeEvents, eErr := node.GetEvents()
o.Expect(eErr).ShouldNot(o.HaveOccurred(), "Error getting drain events for node %s", node.GetName())
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Reboot"), "Error, a Reboot event was triggered but it shouldn't")
logger.Infof("OK!\n")
exutil.By("Check that drain events were triggered")
o.Expect(nodeEvents).To(HaveEventsSequence("Drain"), "Error, a Drain event was triggered but it shouldn't")
logger.Infof("OK!\n")
exutil.By("Check that the /etc/containers/registries.conf file was configured")
rf := NewRemoteFile(node, "/etc/containers/registries.conf")
o.Expect(rf.Fetch()).To(o.Succeed(),
"Error getting file /etc/containers/registries.conf")
configRegex := `(?s)` + regexp.QuoteMeta(`[[registry]]`) + ".*" +
regexp.QuoteMeta(`registry.redhat.io/openshift4`) + ".*" +
regexp.QuoteMeta(`[[registry.mirror]]`) + ".*" +
regexp.QuoteMeta(`mirror.example.com/redhat`) + ".*" +
`pull-from-mirror *= *"tag-only"`
o.Expect(rf.GetTextContent()).To(o.MatchRegexp(configRegex),
"The file /etc/containers/registries.conf has not been properly configured with the new mirror information")
logger.Infof("OK!\n")
exutil.By("Delete the ImageTagMirrorSet resource")
removeTime := node.GetDateOrFail()
itms.Delete()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the configuration in file /etc/containers/registries.conf was restored")
o.Expect(rf.Fetch()).To(o.Succeed(),
"Error getting file /etc/containers/registries.conf")
o.Expect(rf.GetTextContent()).NotTo(o.ContainSubstring(`example.io/digest-example/ubi-minimal`),
"The configuration in file /etc/containers/registries.conf was not restored after deleting the ImageTagMirrorSet resource")
logger.Infof("OK!\n")
checkMirrorRemovalDefaultEvents(node, removeTime)
})
| |||||
test case
|
openshift/openshift-tests-private
|
36b7d54c-f984-42de-b036-aa45ffbcc643
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-62084-[OnCLayer] Certificate rotation in paused pools[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-62084-[OnCLayer] Certificate rotation in paused pools[Disruptive]", func() {
var (
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
certSecret = NewSecret(oc.AsAdmin(), "openshift-kube-apiserver-operator", "kube-apiserver-to-kubelet-signer")
)
exutil.By("Pause MachineConfigPools")
defer mMcp.waitForComplete()
defer wMcp.waitForComplete()
defer wMcp.pause(false)
wMcp.pause(true)
defer mMcp.pause(false)
mMcp.pause(true)
logger.Infof("OK!\n")
exutil.By("Get current kube-apiserver certificate")
initialCert := certSecret.GetDataValueOrFail("tls.crt")
logger.Infof("Current certificate length: %d", len(initialCert))
logger.Infof("OK!\n")
exutil.By("Rotate certificate")
o.Expect(
certSecret.Patch("merge", `{"metadata": {"annotations": {"auth.openshift.io/certificate-not-after": null}}}`),
).To(o.Succeed(),
"The secret could not be patched in order to rotate the certificate")
logger.Infof("OK!\n")
exutil.By("Get current kube-apiserver certificate")
logger.Infof("Wait for certificate rotation")
o.Eventually(certSecret.GetDataValueOrFail).WithArguments("tls.crt").
ShouldNot(o.Equal(initialCert),
"The certificate was not rotated")
newCert := certSecret.GetDataValueOrFail("tls.crt")
logger.Infof("New certificate length: %d", len(newCert))
logger.Infof("OK!\n")
o.Expect(initialCert).NotTo(o.Equal(newCert),
"The certificate was not rotated")
logger.Infof("OK!\n")
// We verify all nodes in the pools (be aware that windows nodes do not belong to any pool, we are skipping them)
for _, node := range append(wMcp.GetNodesOrFail(), mMcp.GetNodesOrFail()...) {
logger.Infof("Checking certificate in node: %s", node.GetName())
rfCert := NewRemoteFile(node, "/etc/kubernetes/kubelet-ca.crt")
// Eventually the certificate file in all nodes should contain the new rotated certificate
o.Eventually(func(gm o.Gomega) string { // Passing o.Gomega as parameter we can use assertions inside the Eventually function without breaking the retries.
gm.Expect(rfCert.Fetch()).To(o.Succeed(),
"Cannot read the certificate file in node:%s ", node.GetName())
return rfCert.GetTextContent()
}, "5m", "10s").
Should(o.ContainSubstring(newCert),
"The certificate file %s in node %s does not contain the new rotated certificate.", rfCert.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
}
exutil.By("Unpause MachineConfigPools")
logger.Infof("Check that once we unpause the pools the pending config can be applied without problems")
wMcp.pause(false)
mMcp.pause(false)
wMcp.waitForComplete()
mMcp.waitForComplete()
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
ba154d80-1868-4c7a-a7d1-cb43fd437023
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-63477-[P2] Deploy files using all available ignition configs. Default 3.4.0[Disruptive]
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-63477-[P2] Deploy files using all available ignition configs. Default 3.4.0[Disruptive]", func() {
var (
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcNames = "mc-tc-63477"
allVersions = []string{"2.2.0", "3.0.0", "3.1.0", "3.2.0", "3.3.0", "3.4.0"}
defaultIgnitionVersion = "3.4.0" // default version is 3.4.0 for OCP > 4.13
)
defer wMcp.waitForComplete()
exutil.By("Create MCs with all available ignition versions")
for _, version := range allVersions {
vID := strings.ReplaceAll(version, ".", "-")
fileConfig := ""
filePath := "/etc/" + vID + ".test"
mcName := mcNames + "-" + vID
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.skipWaitForMcp = true
defer mc.deleteNoWait()
logger.Infof("Create MC %s", mc.name)
// 2.2.0 ignition config defines a different config for files
if version == "2.2.0" {
logger.Infof("Generating 2.2.0 file config!")
file := ign22File{
Path: filePath,
Contents: ign22Contents{
Source: GetBase64EncodedFileSourceContent(version + " test file"),
},
Mode: PtrInt(420), // decimal 0644
Filesystem: "root",
}
fileConfig = string(MarshalOrFail(file))
} else {
logger.Debugf("Generating 3.x file config!")
file := ign32File{
Path: filePath,
Contents: ign32Contents{
Source: GetBase64EncodedFileSourceContent(version + " test file"),
},
Mode: PtrInt(420), // decimal 0644
}
fileConfig = string(MarshalOrFail(file))
}
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", fileConfig), "IGNITION_VERSION=" + version}
mc.create()
}
logger.Infof("OK!\n")
exutil.By("Wait for MCP to be updated")
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify default rendered ignition version")
renderedMC, err := wMcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the rendered config for pool %s", wMcp.GetName())
o.Expect(renderedMC.GetIgnitionVersion()).To(o.Equal(defaultIgnitionVersion),
"Rendered MC should use %s default ignition version", defaultIgnitionVersion)
logger.Infof("OK!\n")
exutil.By("Verify that all files were created")
node := wMcp.GetNodesOrFail()[0]
for _, version := range allVersions {
vID := strings.ReplaceAll(version, ".", "-")
filePath := "/etc/" + vID + ".test"
logger.Infof("Checking file %s", filePath)
rf := NewRemoteFile(node, filePath)
o.Expect(rf.Fetch()).NotTo(o.HaveOccurred(),
"Cannot get information about file %s in node %s", filePath, node.GetName())
o.Expect(rf.GetTextContent()).To(o.Equal(version+" test file"),
"File %s in node %s ha not the right content")
}
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
d897634a-18f9-4c6d-bb62-db2db82e0953
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-63868-[OnCLayer] ControllerConfig sync after Infrastructure objects are updated[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-63868-[OnCLayer] ControllerConfig sync after Infrastructure objects are updated[Disruptive]", func() {
var (
label = "break.the.mco"
labelValue = "yes-tc-63868"
infra = NewResource(oc.AsAdmin(), "Infrastructure", "cluster")
mcCO = NewResource(oc.AsAdmin(), "ClusterOperator", "machine-config")
)
exutil.By("Label a Infrastructure resource")
defer func() {
// In case of error, the machine-config ClusterOperator will become degraded,
// so we need to recover the machine-config CO from degraded state.
// It is done by removing the machine-config-operator pod.
_ = infra.RemoveLabel(label)
oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", MachineConfigNamespace,
"-l", "k8s-app=machine-config-operator", "--ignore-not-found=true").Execute()
o.Eventually(mcCO, "5m", "30s").ShouldNot(BeDegraded(), "Could not recover the machine-config CO from degraded status")
}()
o.Expect(
infra.AddLabel(label, labelValue),
).To(
o.Succeed(),
"%s/%s could not be labeled", infra.GetKind(), infra.GetName())
logger.Infof("OK!\n")
exutil.By("Check that machine-config ClusterOperator is not degraded")
o.Consistently(mcCO,
"5m", "30s").ShouldNot(BeDegraded(),
"machine-config ClusterOperator is degraded.\n%s", mcCO.PrettyString())
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
cc801acd-de18-4b79-8dee-4b02a0451e2c
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Low-72136-[P1][OnCLayer] Reject MCs with ignition containing kernelArguments [Disruptive]
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Low-72136-[P1][OnCLayer] Reject MCs with ignition containing kernelArguments [Disruptive]", func() {
var (
mcName = "mco-tc-66376-reject-ignition-kernel-arguments"
mcp = GetCompactCompatiblePool(oc.AsAdmin())
// quotemeta to scape regex characters in the file path
expectedRDMessage = regexp.QuoteMeta(`ignition kargs section contains changes`)
expectedRDReason = ""
)
exutil.By("Create a MC with an ignition section that declares kernel arguments")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName()).SetMCOTemplate("add-ignition-kernel-arguments.yaml")
mc.skipWaitForMcp = true
validateMcpRenderDegraded(mc, mcp, expectedRDMessage, expectedRDReason)
})
| |||||
test case
|
openshift/openshift-tests-private
|
8b35e54f-124a-4803-be0c-a59ecdcc4e34
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-66436-[P2][OnCLayer] disable weak SSH cipher suites [Serial]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-66436-[P2][OnCLayer] disable weak SSH cipher suites [Serial]", func() {
var (
// the list of weak cipher suites can be found here: https://issues.redhat.com/browse/OCPBUGS-15202
weakSuites = []string{"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
"TLS_RSA_WITH_AES_128_CBC_SHA",
"TLS_RSA_WITH_AES_256_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"}
)
exutil.By("Verify that the controller pod is not using weakSuites")
ccRbacProxyArgs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", MachineConfigNamespace, "-l", ControllerLabel+"="+ControllerLabelValue,
"-o", `jsonpath={.items[0].spec.containers[?(@.name=="kube-rbac-proxy")].args}`).Output()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the arguments used in kube-rbac-proxy container in the controller pod")
o.Expect(ccRbacProxyArgs).To(o.ContainSubstring("--tls-cipher-suites"),
"Controller's kube-rbac-proxy container is not declaring the list of allowed cipher suites")
for _, weakSuite := range weakSuites {
logger.Infof("Verifying that %s is not used", weakSuite)
o.Expect(ccRbacProxyArgs).NotTo(o.ContainSubstring(weakSuite),
"Controller's kube-rbac-proxy container is using the weak cipher suite %s, and it should not", weakSuite)
logger.Infof("Suite ok")
}
logger.Infof("OK!\n")
exutil.By("Connect to the rbac-proxy service to verify the cipher")
mMcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
masterNode := mMcp.GetNodesOrFail()[0]
cipherOutput, cipherErr := masterNode.DebugNodeWithOptions([]string{"--image=" + TestSSLImage, "-n", MachineConfigNamespace}, "testssl.sh", "--color", "0", "localhost:9001")
logger.Infof("test ssh script output:\n %s", cipherOutput)
o.Expect(cipherErr).NotTo(o.HaveOccurred())
o.Expect(cipherOutput).Should(o.MatchRegexp(`Obsoleted CBC ciphers \(AES, ARIA etc.\) +not offered`))
for _, weakSuite := range weakSuites {
logger.Infof("Verifying that %s is not used", weakSuite)
o.Expect(cipherOutput).NotTo(o.ContainSubstring(weakSuite),
"The rbac-proxy service cipher test is reporting weak cipher suite: %s", weakSuite)
logger.Infof("Suite ok")
}
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
d6d21004-2773-4f2a-b3bf-439226ab464f
|
Author:sregidor-NonHyperShiftHOST-Low-65208-[OnCLayer] Check the visibility of certificates
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-Low-65208-[OnCLayer] Check the visibility of certificates", func() {
var (
cc = NewControllerConfig(oc.AsAdmin(), "machine-config-controller")
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
exutil.By("Check that the ControllerConfig resource is storing the right kube-apiserver-client-ca information")
kubeAPIServerClientCACM := NewNamespacedResource(oc.AsAdmin(), "ConfigMap", "openshift-config-managed", "kube-apiserver-client-ca")
kubeAPIServerClientCA := kubeAPIServerClientCACM.GetOrFail(`{.data.ca-bundle\.crt}`)
ccKubeAPIServerClientCA, err := cc.GetKubeAPIServerServingCAData()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the kubeAPIServerServingCAData information from the ControllerConfig")
// We write the Expect command so that the certificates are not printed in case of failure
o.Expect(strings.Trim(ccKubeAPIServerClientCA, "\n") == strings.Trim(kubeAPIServerClientCA, "\n")).To(o.BeTrue(),
"The value of kubeAPIServerServingCAData in the ControllerConfig does not equal the value of configmap -n openshift-config-managed kube-apiserver-client-ca")
logger.Infof("OK!\n")
exutil.By("Check that the ControllerConfig resource is storing the right rootCAData information")
rootCADataCM := NewNamespacedResource(oc.AsAdmin(), "ConfigMap", "kube-system", "root-ca")
rootCAData := rootCADataCM.GetOrFail(`{.data.ca\.crt}`)
ccRootCAData, err := cc.GetRootCAData()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rootCAData information from the ControllerConfig")
// We write the Expect command so that the certificates are not printed in case of failure
o.Expect(strings.Trim(ccRootCAData, "\n") == strings.Trim(rootCAData, "\n")).To(o.BeTrue(),
"The value of rootCAData in the ControllerConfig does not equal the value of configmap -n kube-system root-ca")
logger.Infof("OK!\n")
exutil.By("Check the information from the KubeAPIServerServingCAData certificates")
ccKCertsInfo, err := cc.GetCertificatesInfoByBundleFileName("KubeAPIServerServingCAData")
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the controller config information for KubeAPIServerServingCAData certificates")
kubeAPIServerCertsInfo, err := GetCertificatesInfoFromPemBundle("KubeAPIServerServingCAData", []byte(ccKubeAPIServerClientCA))
o.Expect(err).NotTo(o.HaveOccurred(),
"Error extracting certificate info from KubeAPIServerServingCAData pem bundle")
o.Expect(kubeAPIServerCertsInfo).To(o.Equal(ccKCertsInfo),
"The ControllerConfig is not reporting the right information about the certificates in KubeAPIServerServingCAData bundle")
logger.Infof("OK!\n")
exutil.By("Check the information from the rootCAData certificates")
ccRCertsInfo, err := cc.GetCertificatesInfoByBundleFileName("RootCAData")
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the controller config information for rootCAData certificates")
rootCACertsInfo, err := GetCertificatesInfoFromPemBundle("RootCAData", []byte(ccRootCAData))
o.Expect(err).NotTo(o.HaveOccurred(),
"Error extracting certificate info from RootCAData pem bundle")
o.Expect(rootCACertsInfo).To(o.Equal(ccRCertsInfo),
"The ControllerConfig is not reporting the right information about the certificates in rootCAData pem bundle")
logger.Infof("OK!\n")
exutil.By("Check that MCPs are reporting information regarding kubeapiserverserviccadata certificates")
certsExpiry, err := mMcp.GetCertsExpiry()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the certificates expiry information from master MCP")
o.Expect(certsExpiry).To(o.HaveLen(len(ccKCertsInfo)),
"The expry certs info reported in master MCP has len %d, but the list of kubeAPIServer certs has len %d.\nExpiry:%s\nKubeAPIServer:%s",
len(certsExpiry), len(ccKCertsInfo), certsExpiry, ccKCertsInfo)
for i, certInfo := range ccKCertsInfo {
certExpry := certsExpiry[i]
logger.Infof("%s", certExpry)
o.Expect(certExpry).To(ogs.MatchAllFields(ogs.Fields{
"Bundle": o.Equal(certInfo.BundleFile),
// Date fields have been temporarily removed by devs: https://github.com/openshift/machine-config-operator/pull/3866
"Expiry": o.Equal(certInfo.NotAfter),
"Subject": o.Equal(certInfo.Subject),
}),
"Exipirty information does not match the information repoted in the ControllerConfig")
}
logger.Infof("OK!\n")
exutil.By("Check that the description of ControllerConfig includes the certificates info")
ccDesc, err := cc.Describe()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error describing the ControllerConfig resource")
o.Expect(ccDesc).To(o.And(
o.ContainSubstring("Controller Certificates:"),
o.ContainSubstring("Bundle File"),
// Date fields have been temporarily removed by devs: https://github.com/openshift/machine-config-operator/pull/3866
o.ContainSubstring("Not After"),
o.ContainSubstring("Not Before"),
o.ContainSubstring("Signer"),
o.ContainSubstring("Subject"),
),
"The ControllerConfig description should include information about the certificate, but it does not:\n%s", ccDesc)
logger.Infof("OK!\n")
exutil.By("Check that the description of MCP includes the certificates info")
mMcpDesc, err := mMcp.Describe()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error describing the master MCP resource")
o.Expect(mMcpDesc).To(o.And(
o.ContainSubstring("Cert Expirys"),
o.ContainSubstring("Bundle"),
// Date fields have been temporarily removed by devs: https://github.com/openshift/machine-config-operator/pull/3866
o.ContainSubstring("Expiry"),
),
"The master MCP description should include information about the certificate, but it does not:\n%s", ccDesc)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
cf212fd0-e261-44f9-a49d-9a887c6d9e8a
|
Author:sregidor-NonHyperShiftHOST-Low-66046-[P1][OnCLayer] Check image registry certificates
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-Low-66046-[P1][OnCLayer] Check image registry certificates", func() {
if !IsCapabilityEnabled(oc, "ImageRegistry") {
g.Skip("ImageRegistry is not installed, skip this test")
}
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
node = mcp.GetNodesOrFail()[0]
cc = NewControllerConfig(oc.AsAdmin(), "machine-config-controller")
)
imageRegistryCerts, err := GetImageRegistryCertificates(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the image registry certificates")
for certFile, certValue := range imageRegistryCerts {
logger.Infof("Checking Certfile: %s", certFile)
exutil.By(fmt.Sprintf("Check that the ControllerConfig resource has the right value for bundle file %s", certFile))
ccImageRegistryBundle, err := cc.GetImageRegistryBundleDataByFileName(certFile)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the image registry bundle in file %s in the ControllerConfig resource",
certFile)
o.Expect(ccImageRegistryBundle == certValue).To(o.BeTrue(),
"The ControllerConfig resource does not have the right value for the image registry bundle %s",
certFile)
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf("Check that the ControllerConfig resource reports the right information about bundle file %s", certFile))
certInfo, err := GetCertificatesInfoFromPemBundle(certFile, []byte(certValue))
o.Expect(err).NotTo(o.HaveOccurred(),
"Error extracting certificate info from %s pem bundle", certFile)
ccCertInfo, err := cc.GetCertificatesInfoByBundleFileName(certFile)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the controller config information for %s certificates", certFile)
o.Expect(certInfo).To(o.Equal(ccCertInfo),
"The ControllerConfig is not reporting the right information about the certificates in %s bundle",
certFile)
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf("Check that the file %s has been added to the managed merged trusted image registry configmap", certFile))
o.Eventually(GetManagedMergedTrustedImageRegistryCertificates, "20s", "10s").WithArguments(oc.AsAdmin()).Should(o.HaveKey(certFile),
"The certificate for file %s has not been included in the configmap merged-trusted-image-registry-ca -n openshift-config-managed")
mmtImageRegistryCert, err := GetManagedMergedTrustedImageRegistryCertificates(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting managed merged trusted image registry certificates values")
o.Expect(mmtImageRegistryCert[certFile] == certValue).To(o.BeTrue(),
"The certificate in file %s was added to configmap merged-trusted-image-registry-ca -n openshift-config-managed but it has the wrong content")
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf("Check that the file %s has been added nodes", certFile))
// the filename stored in configmap uses "..", but it is translated to ":" in the node.
// so we replace the ".." with ":"
decodedFileName := strings.ReplaceAll(certFile, "..", ":")
remotePath := ImageRegistryCertificatesDir + "/" + decodedFileName + "/" + ImageRegistryCertificatesFileName
rfCert := NewRemoteFile(node, remotePath)
o.Eventually(func(gm o.Gomega) { // Passing o.Gomega as parameter we can use assertions inside the Eventually function without breaking the retries.
gm.Expect(rfCert.Fetch()).To(o.Succeed(),
"Cannot read the certificate file %s in node:%s ", rfCert.fullPath, node.GetName())
gm.Expect(rfCert.GetTextContent() == certValue).To(o.BeTrue(),
"the certificate stored in file %s does not match the expected value", rfCert.fullPath)
}, "1m", "10s").
Should(o.Succeed(),
"The file %s in node %s does not contain the right certificate.", rfCert.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
}
// If there is no certificate configured, we check that the controlleconfig has empty data
if len(imageRegistryCerts) == 0 {
exutil.By("No certificates configured. Check that ControllerConfig has empty certificates too")
o.Eventually(cc.GetImageRegistryBundleData, "30s", "10s").Should(o.BeEmpty(),
"There are no certificates configured in 'image-registry-ca' configmap but ControllerConfig is not showing empty data")
logger.Infof("OK!\n")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
46d7ca7a-4230-43a2-8d7d-7ab6bb822934
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-64833-[P2][OnCLayer] Do not make an 'orig' copy for config.json file [Serial]
|
['"encoding/json"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-64833-[P2][OnCLayer] Do not make an 'orig' copy for config.json file [Serial]", func() {
var (
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
configJSONFile = "/var/lib/kubelet/config.json"
configJSONOringFile = "/etc/machine-config-daemon/orig/var/lib/kubelet/config.json.mcdorig"
)
for _, node := range append(wMcp.GetNodesOrFail(), mMcp.GetNodesOrFail()...) {
exutil.By(fmt.Sprintf("Check that the /var/lib/kubelet/config.json is preset in node %s", node.GetName()))
configJSONRemoteFile := NewRemoteFile(node, configJSONFile)
configJSONOringRemoteFile := NewRemoteFile(node, configJSONOringFile)
o.Eventually(configJSONRemoteFile.Exists, "20s", "2s").Should(o.BeTrue(),
"The file %s does not exist in node %s", configJSONRemoteFile.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf("Check that the /etc/machine-config-daemon/orig/var/lib/kubelet/config.json.mcdorig is NOT preset in node %s",
node.GetName()))
o.Eventually(configJSONOringRemoteFile.Exists, "20s", "2s").Should(o.BeFalse(),
"The file %s exists in node %s, but it should NOT", configJSONOringRemoteFile.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
0cb1f77f-754c-415b-8b2e-ced5e694cd08
|
Author:sregidor-NonPreRelease-Longduration-High-67788-kernel type 64k-pages is not supported on non-arm64 nodes [Disruptive]
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-High-67788-kernel type 64k-pages is not supported on non-arm64 nodes [Disruptive]", func() {
var (
mcName = "mco-tc-67788-invalid-64k-pages-kernel"
expectedNDMessage = `64k-pages is only supported for aarch64 architecture"`
expectedNDReason = "1 nodes are reporting degraded status on sync"
)
architecture.SkipArchitectures(oc.AsAdmin(), architecture.ARM64)
mcp := GetPoolWithArchDifferentFromOrFail(oc.AsAdmin(), architecture.ARM64)
exutil.By("Create a MC with invalid 64k-pages kernel")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName()).SetMCOTemplate("set-64k-pages-kernel.yaml")
mc.skipWaitForMcp = true
validateMcpNodeDegraded(mc, mcp, expectedNDMessage, expectedNDReason, false)
})
| |||||
test case
|
openshift/openshift-tests-private
|
7f55bdd7-b788-4798-8ece-6c1413e9ff9f
|
Author:rioliu-NonPreRelease-Critical-68695-[P1] Machine-Config-Operator should not be degraded when image-registry is not installed [Serial]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:rioliu-NonPreRelease-Critical-68695-[P1] Machine-Config-Operator should not be degraded when image-registry is not installed [Serial]", func() {
// for cluster setup, we need to use upi-on-aws + baselineCapabilitySet: None, because ipi needs known capability `machine-api`
exutil.By("check whether capability ImageRegistry is enabled, if yes, skip the test")
if IsCapabilityEnabled(oc, "ImageRegistry") {
g.Skip("image registry is installed, skip this test")
}
exutil.By("check operator status, it should not be degraded")
mco := NewResource(oc.AsAdmin(), "co", "machine-config")
o.Expect(mco).ShouldNot(BeDegraded(),
"co/machine-config Degraded condition status is not the expected one: %s", mco.GetConditionByType("Degraded"))
})
| ||||||
test case
|
openshift/openshift-tests-private
|
fd21d139-ebc8-4b78-ae68-cd21e089b086
|
Author:rioliu-NonPreRelease-High-68687-[P2][OnCLayer] HostToContainer propagation in MCD [Serial]
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:rioliu-NonPreRelease-High-68687-[P2][OnCLayer] HostToContainer propagation in MCD [Serial]", func() {
platform := exutil.CheckPlatform(oc)
assertFunc := func(gm o.Gomega, mountPropagations string) {
logger.Infof("mountPropagations:\n %s", mountPropagations)
for _, mp := range strings.Split(mountPropagations, " ") {
gm.Expect(mp).Should(o.Equal("HostToContainer"), "mountPropagation value is not expected [%s]", mp)
}
}
exutil.By("Check mountPropagation for the pods under mco namespace")
mountPropagations := NewNamespacedResourceList(oc, "pod", MachineConfigNamespace).GetOrFail(`{.items[*].spec.containers[*].volumeMounts[?(@.mountPath=="/rootfs")].mountPropagation}`)
o.Eventually(assertFunc).WithArguments(mountPropagations).Should(o.Succeed())
if ns, ok := OnPremPlatforms[platform]; ok {
exutil.By(fmt.Sprintf("Check mountPropagation for the pods on platform %s", platform))
mountPropagations = NewNamespacedResourceList(oc, "pod", ns).GetOrFail(`{.items[*].spec.containers[*].volumeMounts[*].mountPropagation}`)
o.Eventually(assertFunc).WithArguments(mountPropagations).Should(o.Succeed())
}
if platform == GCPPlatform || platform == AzurePlatform || platform == AlibabaCloudPlatform {
exutil.By("Check mountPropagation for the apiserver-watcher pods under openshift-kube-apiserver namespace")
pods, err := NewNamespacedResourceList(oc, "pod", "openshift-kube-apiserver").GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Get pod list under ns/openshift-kube-apiserver failed")
for _, pod := range pods {
if strings.HasPrefix(pod.GetName(), "apiserver-watcher") {
mountPropagations = pod.GetOrFail(`{.spec.containers[*].volumeMounts[?(@.mountPath=="/rootfs")].mountPropagation}`)
o.Eventually(assertFunc).WithArguments(mountPropagations).Should(o.Succeed())
}
}
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
c11452a7-57ae-4477-a776-3ef1a88c9abd
|
Author:sregidor-Longduration-NonPreRelease-Critical-67790-create MC with extensions, 64k-pages kernel type and kernel argument [Disruptive]
|
['"fmt"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-Critical-67790-create MC with extensions, 64k-pages kernel type and kernel argument [Disruptive]", func() {
architecture.SkipIfNoNodeWithArchitectures(oc.AsAdmin(), architecture.ARM64)
clusterinfra.SkipTestIfNotSupportedPlatform(oc.AsAdmin(), clusterinfra.GCP)
// If arm64 Compact/SNO we use master
// Else if possible we create a custom MCP if there are arm64 nodes in the worker pool
// Else if possible we use the first exsisting custom MCP with all its nodes using arm64
// Else master is arm64 we use master
// Else we fail the test
createdCustomPoolName := fmt.Sprintf("mco-test-%s", architecture.ARM64)
defer DeleteCustomMCP(oc.AsAdmin(), createdCustomPoolName)
mcp, nodes := GetPoolAndNodesForArchitectureOrFail(oc.AsAdmin(), createdCustomPoolName, architecture.ARM64, 1)
node := nodes[0]
logger.Infof("Using node %s from pool %s", node.GetName(), mcp.GetName())
exutil.By("Create new MC to add the kernel arguments, kernel type and extension")
mcName := "change-worker-karg-ktype-extension"
mcTemplate := mcName + ".yaml"
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName()).SetMCOTemplate(mcTemplate)
mc.parameters = []string{"KERNELTYPE=64k-pages"}
defer mc.delete()
mc.create()
exutil.By("Check kernel arguments, kernel type and extension on the created machine config")
o.Expect(
getMachineConfigDetails(oc, mc.name),
).Should(
o.And(
o.ContainSubstring("usbguard"),
o.ContainSubstring("z=10"),
o.ContainSubstring("64k-pages"),
),
"The new MC is not using the expected configuration")
logger.Infof("OK!\n")
exutil.By("Check kernel type")
o.Expect(node.Is64kPagesKernel()).To(o.BeTrue(),
"The installed kernel is not the expected one")
o.Expect(
node.RpmIsInstalled("kernel-64k-core", "kernel-64k-modules-core", "kernel-64k-modules-extra", "kernel-64k-modules"),
).Should(o.BeTrue(),
"The installed kernel rpm packages are not the expected ones")
logger.Infof("OK!\n")
exutil.By("Check installed extensions")
o.Expect(
node.RpmIsInstalled("usbguard"),
).Should(
o.BeTrue(),
"The usbguard extension rpm is not installed")
logger.Infof("OK!\n")
exutil.By("Check kernel arguments")
o.Expect(node.IsKernelArgEnabled("z=10")).To(o.BeTrue(),
"The kernel arguments are not the expected ones")
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
2f22a82c-6c5d-4177-9b8a-646bee8538b5
|
Author:rioliu-NonPreRelease-Medium-68688-[OnCLayer] kubeconfig must have 600 permissions in all nodes [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:rioliu-NonPreRelease-Medium-68688-[OnCLayer] kubeconfig must have 600 permissions in all nodes [Serial]", func() {
var (
filePath = "/etc/kubernetes/kubeconfig"
)
exutil.By(fmt.Sprintf("Check file permission of %s on all nodes, 0600 is expected", filePath))
nodes, err := NewNodeList(oc.AsAdmin()).GetAllLinux()
o.Expect(err).NotTo(o.HaveOccurred(), "Get all cluster nodes failed")
for _, node := range nodes {
logger.Infof("Checking file permission of %s on node %s", filePath, node.GetName())
file := NewRemoteFile(node, filePath)
o.Expect(file.Stat()).NotTo(o.HaveOccurred(), "stat cmd is failed on node %s", node.GetName())
o.Expect(file.GetNpermissions()).Should(o.Equal("0600"), "file permission is not expected %s", file.GetNpermissions())
logger.Infof("File permission is expected")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
8ca2d394-58d5-481f-bdf0-b4ddba6d76b9
|
Author:sregidor-NonPreRelease-Critical-69091-[P1][OnCLayer] Machine-Config-Operator skips reboot when configuration matches during node bootstrap pivot [Serial]
|
['bootstrap "github.com/openshift/openshift-tests-private/test/extended/util/bootstrap"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Critical-69091-[P1][OnCLayer] Machine-Config-Operator skips reboot when configuration matches during node bootstrap pivot [Serial]", func() {
var (
MachineConfigDaemonFirstbootService = "machine-config-daemon-firstboot.service"
)
if !IsInstalledWithAssistedInstallerOrFail(oc.AsAdmin()) {
g.Skip("This test can only be executed in clusters installed with assisted-installer. This cluster was not installed using assisted-installer.")
}
exutil.By("Check that the first reboot is skipped")
coreOsNode := NewNodeList(oc.AsAdmin()).GetAllCoreOsNodesOrFail()[0]
logger.Infof("Using node %s", coreOsNode.GetName())
o.Eventually(coreOsNode.GetJournalLogs, "30s", "10s").WithArguments("-u", MachineConfigDaemonFirstbootService).
Should(o.And(
o.ContainSubstring("Starting Machine Config Daemon Firstboot"),
o.Not(o.ContainSubstring(`Changes queued for next boot. Run "systemctl reboot" to start a reboot`)),
o.Not(o.ContainSubstring(`initiating reboot`)),
),
"The %s service should have skipped the first reboot, but it didn't", MachineConfigDaemonFirstbootService)
exutil.By("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
82c4e1ad-8266-408f-b12d-9cf527bbb8a7
|
Author:sregidor-NonPreRelease-High-68736-[P2][OnCLayer] machine config server supports bootstrap with IR certs [Serial]
|
['"fmt"', 'bootstrap "github.com/openshift/openshift-tests-private/test/extended/util/bootstrap"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-High-68736-[P2][OnCLayer] machine config server supports bootstrap with IR certs [Serial]", func() {
var (
mcsBinary = "/usr/bin/machine-config-server"
bootstrapSubCmd = "bootstrap"
expectedBootstrapHelp = "--bootstrap-certs stringArray a certificate bundle formatted in a string array with the format key=value,key=value"
controllerPodName, err = NewController(oc.AsAdmin()).GetPodName()
)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the MCO controller pod to check the bootstrap-certs flag in machine-config-server")
exutil.By(fmt.Sprintf("Check that the bootstrap-certs flag is present in the command: %s %s -h", mcsBinary, bootstrapSubCmd))
o.Eventually(exutil.RemoteShPod, "2m", "20s").
WithArguments(oc.AsAdmin(), MachineConfigNamespace, controllerPodName, mcsBinary, bootstrapSubCmd, "-h").
Should(o.ContainSubstring(expectedBootstrapHelp),
"The --bootstrap-certs flag is not available in the machine-config-server binary")
exutil.By("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
9621f7ee-b8ef-4ce6-bf19-1015792abf14
|
Author:sregidor-NonPreRelease-High-68682-[OnCLayer] daemon should not pull baremetalRuntimeCfg every time [Serial]
|
['"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-High-68682-[OnCLayer] daemon should not pull baremetalRuntimeCfg every time [Serial]", func() {
SkipIfNotOnPremPlatform(oc.AsAdmin())
resolvPrependerService := "on-prem-resolv-prepender.service"
nodes, err := NewNodeList(oc.AsAdmin()).GetAllLinux()
o.Expect(err).NotTo(o.HaveOccurred(),
"Could not get the list of Linux nodes")
for _, node := range nodes {
exutil.By(fmt.Sprintf("Check %s in node %s", resolvPrependerService, node.GetName()))
o.Eventually(node.GetJournalLogs, "5s", "1s").WithArguments("-u", resolvPrependerService).Should(
o.ContainSubstring("Image exists, no need to download"),
"%s should not try to download images more than once. Check OCPBUGS-18772.", resolvPrependerService,
)
logger.Infof("OK!\n")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
e3a90044-6bef-4379-babd-6f3eec41f220
|
Author:sregidor-NonPreRelease-Medium-68686-[P1][OnCLayer] MCD No invalid memory address or nil pointer dereference when kubeconfig file is not present in a node [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Medium-68686-[P1][OnCLayer] MCD No invalid memory address or nil pointer dereference when kubeconfig file is not present in a node [Disruptive]", func() {
var (
node = GetCompactCompatiblePool(oc.AsAdmin()).GetNodesOrFail()[0]
kubeconfig = "/etc/kubernetes/kubeconfig"
kubeconfigBack = kubeconfig + ".back"
)
logger.Infof("Using node %s for testing", node.GetName())
defer func() {
logger.Infof("Starting defer logic")
_, err := node.DebugNodeWithChroot("mv", kubeconfigBack, kubeconfig)
if err != nil {
logger.Errorf("Error restoring the original kubeconfigfile: %s", err)
}
err = NewNamespacedResource(oc.AsAdmin(), "pod", MachineConfigNamespace, node.GetMachineConfigDaemon()).Delete()
if err != nil {
logger.Errorf("Error deleting the MCD pod to restore the original kubeconfigfile: %s", err)
}
exutil.AssertAllPodsToBeReady(oc.AsAdmin(), MachineConfigNamespace)
logger.Infof("Defer logic finished")
}()
exutil.By(fmt.Sprintf("Remove the %s file", kubeconfig))
_, err := node.DebugNodeWithChroot("mv", kubeconfig, kubeconfigBack)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error removing the file %s from node %s", kubeconfig, node.GetName())
logger.Infof("File %s was moved to %s", kubeconfig, kubeconfigBack)
logger.Infof("OK!\n")
exutil.By("Remove the MCDs pod")
mcdPodName := node.GetMachineConfigDaemon()
mcdPod := NewNamespacedResource(oc.AsAdmin(), "pod", MachineConfigNamespace, mcdPodName)
o.Expect(
mcdPod.Delete(),
).To(
o.Succeed(),
"Error deleting the MCD pod %s for node %s", mcdPod.GetName(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the pod failed but did not panic")
logger.Infof("Check that the pod is failing")
o.Eventually(
node.GetMachineConfigDaemon, "2m", "10s",
).ShouldNot(
o.Equal(mcdPodName),
"A new MCD pod should be created after removing the old one, but no new MCD pod was created")
mcdPod = NewNamespacedResource(oc.AsAdmin(), "pod", MachineConfigNamespace, node.GetMachineConfigDaemon())
o.Eventually(
mcdPod.Get, "2m", "10s",
).WithArguments(`{.status.containerStatuses[?(@.name=="machine-config-daemon")].state.terminated}`).ShouldNot(o.Or(
o.BeEmpty(),
o.ContainSubstring("panic:"),
), "The new MCD pod should fail without panic because the file %s is not available", kubeconfig)
logger.Infof("Check pod logs to make sure that it did not panic")
o.Consistently(
node.GetMCDaemonLogs, "1m", "20s",
).WithArguments("").ShouldNot(
o.ContainSubstring("panic:"),
"The new MCD pod should not panic")
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
e8e9b486-3d5b-4d1d-b9b3-4f089cbb9529
|
Author:sregidor-NonPreRelease-Medium-68684-[P2][OnCLayer] machine-config-controller pod restart should not make nodes unschedulable [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Medium-68684-[P2][OnCLayer] machine-config-controller pod restart should not make nodes unschedulable [Disruptive]", func() {
var (
controller = NewController(oc.AsAdmin())
masterNode = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster).GetNodesOrFail()[0]
)
exutil.By("Check that nodes are not modified when the controller pod is removed")
labels, err := masterNode.Get(`{.metadata.labels}`)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the labels in node %s", masterNode.GetName())
masterNode.oc.NotShowInfo() // avoid spamming the logs
o.Consistently(func(gm o.Gomega) { // Passing o.Gomega as parameter we can use assertions inside the Consistently function without breaking the retries.
logger.Infof("Remove controller pod")
gm.Expect(controller.RemovePod()).To(o.Succeed(), "Could not remove the controller pod")
logger.Infof("Check that the node was not modified")
gm.Consistently(func(gm o.Gomega) {
gm.Expect(masterNode.Get(`{.metadata.labels}`)).To(o.MatchJSON(labels),
"Labels in node %s have changed after removing the controller pod, and they should not change", masterNode.GetName())
gm.Expect(masterNode.IsCordoned()).To(o.BeFalse(),
"The node %s was cordoned after removing the controller pod. Node: \n%s",
masterNode.GetName(), masterNode.PrettyString())
}, "10s", "0s").
Should(o.Succeed(),
"The node %s was modified when the controller pod was removed")
}, "4m", "1s").
Should(o.Succeed(),
"When we remove the controller pod the node %s is modified")
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
8d502c83-5dce-4cfc-b5a2-342f0a0b1010
|
Author:sregidor-NonPreRelease-Medium-68797-Custom pool configs take priority over worker configs [Disruptive]
|
['"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonPreRelease-Medium-68797-Custom pool configs take priority over worker configs [Disruptive]", func() {
if IsCompactOrSNOCluster(oc.AsAdmin()) {
g.Skip("The cluster is SNO/Compact. This test cannot be executed in SNO/Compact clusters")
}
var (
kubeletConfPath = "/etc/kubernetes/kubelet.conf"
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
createdCustomPoolName = "mco-test-68797"
kcTemplate = generateTemplateAbsolutePath("generic-kubelet-config.yaml")
workerKcName = "worker-tc-68797-kubeburst"
workerKubeletConfig = `{"kubeAPIBurst": 7000}`
infraFirstKcName = "infra-first-tc-68797-kubeburst"
infraFirstKubeletConfig = `{"kubeAPIBurst": 8000}`
infraSecondKcName = "infra-second-tc-68797-kubeburst"
infraSeconddKubeletConfig = `{"kubeAPIBurst": 9000}`
)
// In DeleteCustomMCP deffered function, when we delete a MCP, we wait first for the worker MCP to be updated.
// No need to defer the worker MCP WaitForComplete logic.
defer DeleteCustomMCP(oc.AsAdmin(), createdCustomPoolName)
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), createdCustomPoolName, 1)
o.Expect(err).NotTo(o.HaveOccurred(), "Could not create a new custom MCP")
exutil.By("Create Kubelet Configurations")
logger.Infof("Create worker KubeletConfig")
wKc := NewKubeletConfig(oc.AsAdmin(), workerKcName, kcTemplate)
defer wKc.Delete()
wKc.create("KUBELETCONFIG="+workerKubeletConfig, "POOL="+wMcp.GetName())
exutil.By("Wait for configurations to be applied in worker pool")
wMcp.waitForComplete()
infraMcp.waitForComplete()
logger.Infof("OK!\n")
logger.Infof("Create first infra KubeletConfig")
infraFirstKc := NewKubeletConfig(oc.AsAdmin(), infraFirstKcName, kcTemplate)
defer infraFirstKc.Delete()
infraFirstKc.create("KUBELETCONFIG="+infraFirstKubeletConfig, "POOL="+infraMcp.GetName())
logger.Infof("Create second infra KubeletConfig")
infraSecondKc := NewKubeletConfig(oc.AsAdmin(), infraSecondKcName, kcTemplate)
defer infraSecondKc.Delete()
infraSecondKc.create("KUBELETCONFIG="+infraSeconddKubeletConfig, "POOL="+infraMcp.GetName())
logger.Infof("OK!\n")
exutil.By("Wait for configurations to be applied in custom pool")
infraMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check kubelet configuration in worker pool")
o.Expect(
NewRemoteFile(wMcp.GetNodesOrFail()[0], kubeletConfPath).Read(),
).To(
HaveContent(o.Or(o.ContainSubstring(`"kubeAPIBurst": 7000`), o.ContainSubstring(`kubeAPIBurst: 7000`))),
)
logger.Infof("OK!\n")
exutil.By("Check kubelet configuration in infra pool")
o.Expect(
NewRemoteFile(infraMcp.GetNodesOrFail()[0], kubeletConfPath).Read(),
).To(o.And(
HaveContent(o.Or(o.ContainSubstring(`"kubeAPIBurst": 9000`), o.ContainSubstring(`kubeAPIBurst: 9000`))),
o.Not(HaveContent(o.Or(o.ContainSubstring(`"kubeAPIBurst": 8000`), o.ContainSubstring(`kubeAPIBurst: 8000`)))),
))
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
0c5c8a30-d276-4ff0-bc66-806fa308ef10
|
Author:rioliu-NonHyperShiftHOST-Critical-70090-[P1][OnCLayer] apiserver-url.env file can be created on all cluster nodes [Serial]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:rioliu-NonHyperShiftHOST-Critical-70090-[P1][OnCLayer] apiserver-url.env file can be created on all cluster nodes [Serial]", func() {
exutil.By("Check file apiserver-url.env on all linux nodes")
apiserverURLEnvFile := "/etc/kubernetes/apiserver-url.env"
allNodes, err := NewNodeList(oc.AsAdmin()).GetAllLinux()
o.Expect(err).NotTo(o.HaveOccurred(), "Get all linux nodes failed")
for _, node := range allNodes {
if node.HasTaintEffectOrFail("NoExecute") {
logger.Infof("Node %s is tainted with 'NoExecute'. Validation skipped.", node.GetName())
continue
}
logger.Infof("Check apiserver-url.env file on node %s", node.GetName())
rf := NewRemoteFile(node, apiserverURLEnvFile)
o.Expect(rf.Exists()).Should(o.BeTrue(), "file %s not found on node %s", apiserverURLEnvFile, node.GetName())
logger.Infof("OK\n")
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
5cf2e2dd-a744-4677-9bdc-856e63101d5f
|
Author:rioliu-NonPreRelease-Longduration-High-70125-[P2][OnCLayer] Test patch annotation way of updating a paused pool [Disruptive]
|
['"fmt"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-70125-[P2][OnCLayer] Test patch annotation way of updating a paused pool [Disruptive]", func() {
var (
workerMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcName = "create-test-file-70125"
filePath = "/etc/test-file-70125"
fileConfig = getURLEncodedFileConfig(filePath, "test-70125", "420")
)
exutil.By("Pause worker pool")
workerMcp.pause(true)
o.Expect(workerMcp.IsPaused()).Should(o.BeTrue(), "worker pool is not paused")
exutil.By("Create a MC for worker nodes")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetMCOTemplate(GenericMCTemplate)
mc.SetParams(fmt.Sprintf("FILES=[%s]", fileConfig))
mc.skipWaitForMcp = true
defer workerMcp.RecoverFromDegraded()
defer mc.delete()
// unpause the mcp first in defer logic, so nodes can be recovered automatically
defer workerMcp.pause(false)
mc.create()
exutil.By("Patch desired MC annotation to trigger update")
// get desired rendered mc from mcp.spec.configuration.name
currentConfig, ccerr := workerMcp.getConfigNameOfStatus()
o.Expect(ccerr).NotTo(o.HaveOccurred(), "Get current MC of worker pool failed")
o.Eventually(workerMcp.getConfigNameOfSpec, "2m", "5s").ShouldNot(o.Equal(currentConfig))
desiredConfig, dcerr := workerMcp.getConfigNameOfSpec()
o.Expect(dcerr).NotTo(o.HaveOccurred(), "Get desired MC of worker pool failed")
o.Expect(desiredConfig).NotTo(o.BeEmpty(), "Cannot get desired MC")
logger.Infof("Desired MC is: %s\n", desiredConfig)
allWorkerNodes := NewNodeList(oc.AsAdmin()).GetAllLinuxWorkerNodesOrFail()
o.Expect(allWorkerNodes).NotTo(o.BeEmpty(), "Cannot get any worker node from worker pool")
workerNode := allWorkerNodes[0]
if exutil.OrFail[bool](workerMcp.IsOCL()) {
logger.Infof("OCL cluster, we need to patch the desiredImage annotation too")
logger.Infof("Start to patch annotations [machineconfiguration.openshift.io/desiredConfig] and [machineconfiguration.openshift.io/desiredImage] for worker node %s", workerNode.GetName())
mosc := exutil.OrFail[*MachineOSConfig](workerMcp.GetMOSC())
var mosb *MachineOSBuild
o.Eventually(func() (string, error) {
var err error
mosb, err = mosc.GetCurrentMachineOSBuild()
if err != nil {
return "", err
}
return mosb.GetMachineConfigName()
}, "5m", "15s").Should(o.Equal(desiredConfig), "The MOSC resource was not updated with the right MOSB")
logger.Infof("Waiting for the image to be built")
o.Eventually(mosb, "20m", "20s").Should(HaveConditionField("Building", "status", FalseString), "Build was not finished")
o.Eventually(mosb, "10m", "20s").Should(HaveConditionField("Succeeded", "status", TrueString), "Build didn't succeed")
desiredImage := exutil.OrFail[string](mosb.GetStatusDigestedImagePullSpec())
o.Eventually(mosc.GetStatusCurrentImagePullSpec, "2m", "10s").Should(o.Equal(desiredImage), "The MOSC resource was not updated")
logger.Infof("Desired Image is: %s\n", desiredImage)
o.Expect(desiredImage).NotTo(o.BeEmpty(), "Cannot get desired image")
workerNode.PatchDesiredConfigAndDesiredImage(desiredConfig, desiredImage)
} else {
logger.Infof("Not OCL cluster, we only patch the desiredConfig annotation")
logger.Infof("Start to patch annotation [machineconfiguration.openshift.io/desiredConfig] for worker node %s", workerNode.GetName())
workerNode.PatchDesiredConfig(desiredConfig)
}
// wait update to complete
o.Eventually(workerNode.IsUpdating, "5m", "5s").Should(o.BeTrue(), "Node is not updating")
o.Eventually(workerNode.IsUpdated, "10m", "10s").Should(o.BeTrue(), "Node is not updated")
o.Eventually(workerMcp.getUpdatedMachineCount, "2m", "15s").Should(o.Equal(1), "The MCP is not properly reporting the updated node")
logger.Infof("Node %s is updated to desired MC %s", workerNode.GetName(), desiredConfig)
exutil.By("Unpause worker pool")
workerMcp.pause(false)
o.Expect(workerMcp.IsPaused()).Should(o.BeFalse(), "worker pool is not unpaused")
logger.Infof("MCP worker is unpaused\n")
exutil.By("Check worker pool is updated")
workerMcp.waitForComplete()
exutil.By("Check file exists on all worker nodes")
for _, node := range allWorkerNodes {
o.Expect(NewRemoteFile(node, filePath).Exists()).Should(o.BeTrue(), "Cannot find expected file %s on node %s", filePath, node.GetName())
logger.Infof("File %s can be found on node %s\n", filePath, node.GetName())
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
bb42c097-fd3a-444d-91ca-90fb13ce0325
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-71277-ImageTagMirrorSet. Skip image registry change disruption[Disruptive]
|
['"regexp"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-71277-ImageTagMirrorSet. Skip image registry change disruption[Disruptive]", func() {
var (
itmsName = "tc-71277-tag-mirror-skip-drain"
overrideDrainConfigMapTemplateName = "image-registry-override-drain-configmap.yaml"
overrideDrainConfigMap = NewConfigMap(oc.AsAdmin(), MachineConfigNamespace, "image-registry-override-drain")
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
node = mcp.GetNodesOrFail()[0]
)
// ImageTagMirrorSet is not compatible with ImageContentSourcePolicy.
// If any ImageContentSourcePolicy exists we skip this test case.
skipTestIfImageContentSourcePolicyExists(oc.AsAdmin())
// If techpreview is enabled, then this behaviour is controlled by the new node disruption policy
if exutil.IsTechPreviewNoUpgrade(oc) {
g.Skip("featureSet: TechPreviewNoUpgrade is enabled. This test case cannot be executed with TechPreviewNoUpgrade because this behaviour is contrller by the new node disruption policy")
}
exutil.By("Start capturing events and clean pods logs")
startTime, dErr := node.GetDate()
o.Expect(dErr).ShouldNot(o.HaveOccurred(), "Error getting date in node %s", node.GetName())
o.Expect(node.IgnoreEventsBeforeNow()).NotTo(o.HaveOccurred(),
"Error getting the latest event in node %s", node.GetName())
logger.Infof("Removing all MCD pods to clean the logs.")
o.Expect(RemoveAllMCDPods(oc)).To(o.Succeed(), "Error removing all MCD pods in %s namespace", MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Create image-registry-override-drain configmap")
defer overrideDrainConfigMap.Delete()
o.Expect(
NewMCOTemplate(oc.AsAdmin(), overrideDrainConfigMapTemplateName).Create(),
).To(o.Succeed(),
"Error creating the image-registry-override-drain configmap to override the drain behavior")
logger.Infof("OK!\n")
exutil.By("Create new machine config to deploy a ImageTagMirrorSet configuring a mirror registry")
itms := NewImageTagMirrorSet(oc.AsAdmin(), itmsName, *NewMCOTemplate(oc, "add-image-tag-mirror-set.yaml"))
defer mcp.waitForComplete()
defer itms.Delete()
itms.Create("-p", "NAME="+itmsName)
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check logs to verify that a drain operation was skipped reporting the reason. No reboot happened. Crio was restarted")
o.Expect(
exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigDaemon, node.GetMachineConfigDaemon(), ""),
).Should(o.And(
o.ContainSubstring("Drain was skipped for this image registry update due to the configmap image-registry-override-drain being present. This may not be a safe change"),
o.MatchRegexp(MCDCrioReloadedRegexp)),
"The right actions could not be found in the logs. No drain should happen, no reboot should happen and crio should be restarted")
logger.Infof("OK!\n")
exutil.By("Verify that the node was NOT rebooted")
o.Expect(node.GetUptime()).Should(o.BeTemporally("<", startTime),
"The node %s must NOT be rebooted after applying the configuration, but it was rebooted. Uptime date happened after the start config time.", node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that no drain nor reboot events were triggered")
o.Expect(node.GetEvents()).NotTo(o.Or(
HaveEventsSequence("Drain"),
HaveEventsSequence("Reboot")),
"No Drain and no Reboot events should be triggered")
logger.Infof("OK!\n")
exutil.By("Check that the /etc/containers/registries.conf file was configured")
rf := NewRemoteFile(node, "/etc/containers/registries.conf")
o.Expect(rf.Fetch()).To(o.Succeed(),
"Error getting file /etc/containers/registries.conf")
configRegex := `(?s)` + regexp.QuoteMeta(`[[registry]]`) + ".*" +
regexp.QuoteMeta(`registry.redhat.io/openshift4`) + ".*" +
regexp.QuoteMeta(`[[registry.mirror]]`) + ".*" +
regexp.QuoteMeta(`mirror.example.com/redhat`) + ".*" +
`pull-from-mirror *= *"tag-only"`
o.Expect(rf.GetTextContent()).To(o.MatchRegexp(configRegex),
"The file /etc/containers/registries.conf has not been properly configured with the new mirror information")
logger.Infof("OK!\n")
exutil.By("Delete the ImageDigestMirrorSet resource")
logger.Infof("Removing all MCD pods to clean the logs.")
o.Expect(RemoveAllMCDPods(oc)).To(o.Succeed(), "Error removing all MCD pods in %s namespace", MachineConfigNamespace)
itms.Delete()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the configuration in file /etc/containers/registries.conf was restored")
o.Expect(rf.Fetch()).To(o.Succeed(),
"Error getting file /etc/containers/registries.conf")
o.Expect(rf.GetTextContent()).NotTo(o.ContainSubstring(`example.io/digest-example/ubi-minimal`),
"The configuration in file /etc/containers/registries.conf was not restored after deleting the ImageDigestMirrorSet resource")
logger.Infof("OK!\n")
exutil.By("Check logs to verify that, after deleting the ImageTagMirrorSet, a drain operation was skipped reporting the reason. No reboot happened. Crio was restarted")
o.Expect(
exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigDaemon, node.GetMachineConfigDaemon(), ""),
).Should(o.And(
o.ContainSubstring("Drain was skipped for this image registry update due to the configmap image-registry-override-drain being present. This may not be a safe change"),
o.MatchRegexp(MCDCrioReloadedRegexp)),
"The right actions could not be found in the logs. No drain should happen, no reboot should happen and crio should be restarted")
logger.Infof("OK!\n")
exutil.By("Verify that the node was NOT rebooted")
o.Expect(node.GetUptime()).Should(o.BeTemporally("<", startTime),
"The node %s must NOT be rebooted after applying the configuration, but it was rebooted. Uptime date happened after the start config time.", node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that no drain nor reboot events were triggered")
o.Expect(node.GetEvents()).NotTo(o.Or(
HaveEventsSequence("Drain"),
HaveEventsSequence("Reboot")),
"No Drain and no Reboot events should be triggered")
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
d325f4bf-4f88-4ddd-a908-1d6467882f59
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Critical-72025-[P1][OnCLayer] nmstate keeps service yamls[Disruptive]
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Critical-72025-[P1][OnCLayer] nmstate keeps service yamls[Disruptive]", func() {
var (
node = GetCompactCompatiblePool(oc.AsAdmin()).GetNodesOrFail()[0]
nmstateConfigFileFullPath = "/etc/nmstate/mco-tc-72025-basic-nmsconfig.yml"
nmstateConfigFileAppliedFullPath = strings.ReplaceAll(nmstateConfigFileFullPath, ".yml", ".applied")
nmstateConfigRemote = NewRemoteFile(node, nmstateConfigFileFullPath)
nmstateConfigAppliedRemote = NewRemoteFile(node, nmstateConfigFileAppliedFullPath)
nmstateBasicConfig = `
desiredState:
interfaces:
- name: dummytc72025
type: dummy
state: absent
`
)
exutil.By(fmt.Sprintf("Create a config file for nmstate in node %s", node.GetName()))
logger.Infof("Config content:\n%s", nmstateBasicConfig)
defer func() {
nmstateConfigRemote.Rm("-f")
nmstateConfigAppliedRemote.Rm("-f")
logger.Infof("Restarting nmstate service")
_, err := node.DebugNodeWithChroot("systemctl", "restart", "nmstate")
o.Expect(err).NotTo(o.HaveOccurred(), "Error restarting the nsmtate service in node %s", node.GetName())
}()
logger.Infof("Creating the config file")
o.Expect(nmstateConfigRemote.Create([]byte(nmstateBasicConfig), 0o600)).To(o.Succeed(),
"Error creating the basic config file %s in node %s", nmstateConfigFileFullPath, node.GetName())
nmstateConfigRemote.PrintDebugInfo()
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf("Restart nmstate service in node %s", node.GetName()))
_, err := node.DebugNodeWithChroot("systemctl", "restart", "nmstate")
o.Expect(err).NotTo(o.HaveOccurred(), "Error restarting the nsmtate service in node %s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the configuration file was not removed and it was correctly cloned")
o.Expect(nmstateConfigRemote.Exists()).To(o.BeTrue(),
"The configuration file %s does not exist after restarting the nmstate service, but it should exist", nmstateConfigRemote.GetFullPath())
o.Expect(nmstateConfigAppliedRemote.Exists()).To(o.BeTrue(),
"The applied configuration file %s does not exist after restarting the nmstate service, but it should exist", nmstateConfigAppliedRemote.GetFullPath())
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
eaf13077-6820-457b-8b8b-b3e721447403
|
Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-72008-[P2][OnCLayer] recreate currentconfig missing on the filesystem [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-72008-[P2][OnCLayer] recreate currentconfig missing on the filesystem [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
mcName = "mco-tc-72008"
node = mcp.GetSortedNodesOrFail()[0]
currentConfigFile = "/etc/machine-config-daemon/currentconfig"
filePath = "/etc/mco-test-case-72008"
fileMode = "420"
fileContent = "test"
fileConfig = getURLEncodedFileConfig(filePath, fileContent, fileMode)
)
exutil.By("Remove the file /etc/machine-config-daemon/currentconfig") // remove the currentconfig file
rmCurrentConfig := NewRemoteFile(node, currentConfigFile)
o.Expect(rmCurrentConfig.Rm()).To(o.Succeed(), "Not able to remove %s", rmCurrentConfig.GetFullPath())
o.Expect(rmCurrentConfig).NotTo(Exist(), "%s removed but still exist", rmCurrentConfig)
logger.Infof("OK \n")
exutil.By("Create new Machine config") // new Machineconfig file
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", fileConfig)}
defer mc.delete() // clean
mc.create()
logger.Infof("OK \n")
exutil.By("Check that the file /etc/machine-config-daemon/currentconfig is recreated") // After update currentconfig exist
o.Expect(rmCurrentConfig).To(Exist(), "%s Not exist", rmCurrentConfig)
o.Expect(rmCurrentConfig.Read()).NotTo(HaveContent(o.BeEmpty()), "%s should not be empty file", rmCurrentConfig)
logger.Infof("OK \n")
exutil.By("Check Machine-config is applied") // machine-config is applied
newFile := NewRemoteFile(node, filePath)
o.Expect(newFile.Read()).To(o.And(HaveContent(fileContent), HaveOctalPermissions("0644")), "%s Does not have expected content or permissions", newFile)
logger.Infof("OK\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
ace96212-5863-4fd0-a1f3-56b5054a4246
|
Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-High-72007-[OnCLayer] check node update frequencies
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-High-72007-[OnCLayer] check node update frequencies", func() {
exutil.By("To get node and display its nodeupdate frequiences")
var (
file = "/etc/kubernetes/kubelet.conf"
cmd = "nodeStatusUpdateFrequency|nodeStatusReportFrequency"
)
nodeList, err := NewNodeList(oc.AsAdmin()).GetAllLinux() // Get all nodes
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the list of nodes")
for _, node := range nodeList {
if node.HasTaintEffectOrFail("NoExecute") {
logger.Infof("Node %s is tainted with 'NoExecute'. Validation skipped.", node.GetName())
continue
}
nodeUpdate, err := node.DebugNodeWithChroot("grep", "-E", cmd, file) // To get nodeUpdate frequencies value
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting nodeupdate frequencies for %s", node.GetName())
o.Expect(nodeUpdate).To(o.Or(o.ContainSubstring(`"nodeStatusUpdateFrequency": "10s"`), o.ContainSubstring(`nodeStatusUpdateFrequency: 10s`)), "Value for 'nodeStatusUpdateFrequency' is not same as expected.")
o.Expect(nodeUpdate).To(o.Or(o.ContainSubstring(`"nodeStatusReportFrequency": "5m0s"`), o.ContainSubstring(`nodeStatusReportFrequency: 5m0s`)), "Value for 'nodeStatusReportFrequency' is not same as expected.")
logger.Infof("node/%s %s", node, nodeUpdate)
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
da3b3bbe-0e15-4870-8081-21e2a8ee4491
|
Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73148-[P1][OnCLayer] prune renderedmachineconfigs [Disruptive]
|
['"fmt"', '"regexp"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73148-[P1][OnCLayer] prune renderedmachineconfigs [Disruptive]", func() {
var (
mcName = "fake-worker-pass-1"
mcList = NewMachineConfigList(oc.AsAdmin())
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
NewSortedRenderedMCMaster []MachineConfig
matchString string
)
// create machine config
exutil.By("Create a new MachineConfig")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[{"name":"%s", "passwordHash": "%s" }]`, "core", "fake-b")}
defer mc.delete()
mc.create()
mMcp.WaitImmediateForUpdatedStatus()
logger.Infof("OK!\n")
wSpecConf, specErr := wMcp.getConfigNameOfSpec() // get worker MCP name
o.Expect(specErr).NotTo(o.HaveOccurred())
mSpecConf, specErr := mMcp.getConfigNameOfSpec() // get master MCP name
o.Expect(specErr).NotTo(o.HaveOccurred())
logger.Infof("%s %s \n", wSpecConf, mSpecConf)
// sort mcList by time and get rendered machine config
mcList.SortByTimestamp()
sortedRenderedMCs := mcList.GetMCPRenderedMachineConfigsOrFail()
logger.Infof(" %s", sortedRenderedMCs)
sortedMCListMaster := mcList.GetRenderedMachineConfigForMasterOrFail() // to get master rendered machine config
// 1 To check for `oc adm prune renderedmachineconfigs` cmd
exutil.By("To run prune cmd to know which rendered machineconfigs would be deleted")
pruneMCOutput, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("prune", "renderedmachineconfigs").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the rendered config for pool")
logger.Infof(pruneMCOutput)
for _, mc := range sortedRenderedMCs {
matchString := "dry-run deleting rendered MachineConfig "
if mc.GetName() == wSpecConf || mc.GetName() == mSpecConf {
matchString = "Skip dry-run deleting rendered MachineConfig "
}
o.Expect(pruneMCOutput).To(o.ContainSubstring(matchString+mc.GetName()), "The %s is not same as in-use renderedMC in MCP", mc.GetName()) // to check correct rendered MC will be deleted or skipped
o.Expect(mc.Exists()).To(o.BeTrue(), "The dry run deleted rendered MC is removed but should exist.")
}
logger.Infof("OK!\n")
// 2 To check for `oc adm prune renderedmachineconfigs --count=1 --pool-name master` cmd
exutil.By("To get the rendered machineconfigs based on count and MCP name")
pruneMCOutput, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("prune", "renderedmachineconfigs", "--count=1", "--pool-name", "master").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the rendered config for pool")
logger.Infof(pruneMCOutput)
NewSortedRenderedMCMaster = mcList.GetRenderedMachineConfigForMasterOrFail()
matchString = "dry-run deleting rendered MachineConfig "
if sortedMCListMaster[0].GetName() == mSpecConf {
matchString = "Skip dry-run deleting rendered MachineConfig "
}
o.Expect(pruneMCOutput).To(o.ContainSubstring(matchString+sortedMCListMaster[0].GetName()), "Oldest RenderedMachineConfig is not deleted") // to check old rendered master MC will be getting deleted
o.Expect(NewSortedRenderedMCMaster).To(o.Equal(sortedMCListMaster), "The dry run deleted rendered MC is removed but should exist.")
logger.Infof("OK!\n")
// 3 To check for 'oc adm prune renderedmachineconfigs list' cmd
exutil.By("Get the rendered machineconfigs list")
pruneMCOutput, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("prune", "renderedmachineconfigs", "list").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the rendered config list")
logger.Infof(pruneMCOutput)
o.Expect(pruneMCOutput).To(o.And(o.ContainSubstring(wSpecConf), o.ContainSubstring(mSpecConf)), "Error: Deleted in-use rendered machine configs")
for _, mc := range sortedRenderedMCs {
used := "Currently in use: false"
if mc.GetName() == wSpecConf || mc.GetName() == mSpecConf {
used = "Currently in use: true"
}
o.Expect(pruneMCOutput).To(o.MatchRegexp(regexp.QuoteMeta(mc.GetName()) + ".*-- .*" + regexp.QuoteMeta(used) + ".*")) // to check correct rendered MC is in-use or not
}
logger.Infof("OK!\n")
// 4 To check for 'oc adm prune renderedmachineconfigs list --in-use --pool-name master' cmd
exutil.By("To get the in use rendered machineconfigs for each MCP")
pruneMCOutput, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("prune", "renderedmachineconfigs", "list", "--in-use", "--pool-name", "master").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the rendered config list")
logger.Infof("%s", mSpecConf)
mStatusConf, err := mMcp.getConfigNameOfStatus()
o.Expect(err).NotTo(o.HaveOccurred())
logger.Infof("%s", mStatusConf)
// to check renderedMC is same as `spec` and `status`
o.Expect(pruneMCOutput).To(o.ContainSubstring("spec: "+mSpecConf), "Value for `spec` is not same as expected")
o.Expect(pruneMCOutput).To(o.ContainSubstring("status: "+mStatusConf), "Value for `status` is not same as expected ")
logger.Infof("%s", pruneMCOutput)
logger.Infof("OK!\n")
// 5 To check for `oc adm prune renderedmachineconfigs --count=1 --pool-name master --confirm` cmd
exutil.By("To delete the rendered machineconfigs based on count and MCP name")
pruneMCOutput, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("prune", "renderedmachineconfigs", "--count=1", "--pool-name", "master", "--confirm").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the rendered config list")
NewSortedRenderedMCMaster = mcList.GetRenderedMachineConfigForMasterOrFail()
logger.Infof(pruneMCOutput)
if sortedMCListMaster[0].GetName() == mSpecConf {
matchString = "Skip deleting rendered MachineConfig "
} else {
matchString = "deleting rendered MachineConfig "
for _, newMc := range NewSortedRenderedMCMaster {
o.Expect(newMc.GetName()).NotTo(o.ContainSubstring(sortedMCListMaster[0].GetName()), "Deleted rendered MachineConfig is still present in the new list") // check expected rendered-master MC is been deleted
}
}
o.Expect(pruneMCOutput).To(o.ContainSubstring(matchString+sortedMCListMaster[0].GetName()), "Oldest RenderedMachineConfig is not deleted") // check oldest rendered master MC is been deleted
logger.Infof("OK!\n")
// 6 To check for `oc adm prune renderedmachineconfigs --confirm` cmd
sortedRenderedMCs = mcList.GetMCPRenderedMachineConfigsOrFail() // Get the current list of rendered machine configs
exutil.By("To delete the rendered machineconfigs based on count and MCP name")
pruneMCOutput, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("prune", "renderedmachineconfigs", "--confirm").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the rendered config list")
logger.Infof(pruneMCOutput)
for _, mc := range sortedRenderedMCs {
if mc.GetName() == mSpecConf || mc.GetName() == wSpecConf {
matchString = "Skip deleting rendered MachineConfig "
o.Expect(mc.Exists()).To(o.BeTrue(), "Deleted the in-use rendered MC") // check in-use rendered MC is not been deleted
} else {
matchString = "deleting rendered MachineConfig "
o.Expect(mc.Exists()).To(o.BeFalse(), "The expected rendered MC is not deleted") // check expected rendered MC is been deleted
}
o.Expect(pruneMCOutput).To(o.ContainSubstring(matchString+mc.GetName()), "Oldest RenderedMachineConfig is not deleted")
}
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
27e4a744-55a1-4633-aaee-26ae256911c6
|
Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73155-[P2][OnCLayer] prune renderedmachineconfigs in updating pools[Disruptive]
|
['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73155-[P2][OnCLayer] prune renderedmachineconfigs in updating pools[Disruptive]", func() {
var (
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcList = NewMachineConfigList(oc.AsAdmin())
node = wMcp.GetSortedNodesOrFail()[0]
fileMode = "420"
fileContent = "test1"
filePath = "/etc/mco-test-case-73155-"
mcName = "mco-tc-73155-"
)
mcList.SortByTimestamp() // sort by time
wSpecConf, specErr := wMcp.getConfigNameOfSpec() // get worker MCP name
o.Expect(specErr).NotTo(o.HaveOccurred())
exutil.By("Create new Machine config")
mc := NewMachineConfig(oc.AsAdmin(), mcName+"1", MachineConfigPoolWorker)
fileConfig := getBase64EncodedFileConfig(filePath+"1", fileContent, fileMode)
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", fileConfig)}
mc.skipWaitForMcp = true // to wait to execute command
defer func() {
exutil.By("Check Machine Config are deleted")
o.Expect(NewRemoteFile(node, filePath+"1")).NotTo(Exist(),
"The file %s should NOT exists", filePath+"1")
o.Expect(NewRemoteFile(node, filePath+"2")).NotTo(Exist(),
"The file %s should NOT exists", filePath+"2")
exutil.By("Check the MCP status is not been degreaded")
wMcp.waitForComplete()
}()
defer mc.delete() // Clean up after creation
mc.create()
logger.Infof("OK\n")
exutil.By("Wait for first nodes to be configured")
o.Eventually(node.IsUpdating, "10m", "20s").Should(o.BeTrue())
o.Eventually(node.IsUpdated, "10m", "20s").Should(o.BeTrue()) // check for first node is updated
initialRenderedMC, specErr := wMcp.getConfigNameOfSpec() // check for new worker rendered MC configured
o.Expect(specErr).NotTo(o.HaveOccurred())
logger.Infof("OK\n")
exutil.By("Create new second Machine configs")
fileConfig = getBase64EncodedFileConfig(filePath+"2", fileContent, fileMode)
mc = NewMachineConfig(oc.AsAdmin(), mcName+"2", MachineConfigPoolWorker)
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", fileConfig)}
mc.skipWaitForMcp = true // to wait to execute command
defer mc.deleteNoWait() // Clean up after creation
mc.create()
logger.Infof("OK\n")
exutil.By("Run prune command and check new rendered MC is generated with MCP is still updating")
o.Eventually(wMcp.getConfigNameOfSpec, "5m", "20s").ShouldNot(o.Equal(initialRenderedMC), "Second worker renderedMC is not configured yet")
newRenderedMC, specErr := wMcp.getConfigNameOfSpec()
o.Expect(specErr).NotTo(o.HaveOccurred(), "Get desired MC of worker pool failed")
pruneMCOutput, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("prune", "renderedmachineconfigs", "--pool-name", "worker", "--confirm").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the rendered config list")
logger.Infof(pruneMCOutput)
renderedMCs := []string{wSpecConf, initialRenderedMC, newRenderedMC}
// as wMCP is still updating with previous in-use rendered MC and new generated MC from 1st and 2nd MC created are also in-use so we need to check they are not deleted
for _, mc := range renderedMCs {
o.Expect(pruneMCOutput).To(o.ContainSubstring("Skip deleting rendered MachineConfig "+mc), "Deleted the in-use rendered MC: "+mc)
}
logger.Infof("OK\n")
exutil.By("Check no worker MCP is degreaded")
wMcp.waitForComplete()
exutil.By("Execute the prune command again after complete update")
pruneMCOutput, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("prune", "renderedmachineconfigs", "--pool-name", "worker", "--confirm").Output()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the rendered config list")
logger.Infof(pruneMCOutput)
o.Expect(pruneMCOutput).To(o.ContainSubstring("Skip deleting rendered MachineConfig "+newRenderedMC), "Deleted the in-use rendered MC")
logger.Infof("OK\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
e3a21739-8a95-4d99-8e10-7410692c8863
|
Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Low-74606-[OnCLayer] 'oc adm prune' report failures consistently when using wrong pool name
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Low-74606-[OnCLayer] 'oc adm prune' report failures consistently when using wrong pool name", func() {
var expectedErrorMsg = "error: MachineConfigPool with name 'fake' not found"
out, err := oc.AsAdmin().Run("adm").Args("prune", "renderedmachineconfigs", "list", "--pool-name", "fake").Output()
o.Expect(err).To(o.HaveOccurred(), "Expected oc command error to fail but it didn't")
o.Expect(err).To(o.BeAssignableToTypeOf(&exutil.ExitError{}), "Unexpected error while executing prune command")
o.Expect(err.(*exutil.ExitError).ExitCode()).ShouldNot(o.Equal(0), "Unexpected return code when executing the prune command with a wrong pool name")
o.Expect(out).To(o.Equal(expectedErrorMsg), "Unexecpted error message when using wrong pool name in the prune command")
out, err = oc.AsAdmin().Run("adm").Args("prune", "renderedmachineconfigs", "list", "--in-use", "--pool-name", "fake").Output()
o.Expect(err).To(o.HaveOccurred(), "Expected oc command error to fail but it didn't")
o.Expect(err).To(o.BeAssignableToTypeOf(&exutil.ExitError{}), "Unexpected error while executing prune command with in-use flag")
o.Expect(err.(*exutil.ExitError).ExitCode()).ShouldNot(o.Equal(0), "Unexpected return code when executing the prune command with the in-use flag and a wrong pool name")
o.Expect(out).To(o.Equal(expectedErrorMsg), "Unexecpted error message when using in-use flag and a wrong pool name in the prune command")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
fd537ff6-8775-4188-a8aa-56666dfad8ad
|
Author:sregidor-DEPRECATED-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73309-disable ipv6 on worker nodes[Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-DEPRECATED-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73309-disable ipv6 on worker nodes[Disruptive]", func() {
var (
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mcName = "mco-tc-73309-disable-ipv6"
kernelArg = "ipv6.disable=1"
behaviourValidatorApply = UpdateBehaviourValidator{
Checkers: []Checker{
CommandOutputChecker{
Command: []string{"cat", "/proc/cmdline"},
Matcher: o.ContainSubstring(kernelArg),
ErrorMsg: fmt.Sprintf("The kernel argument to disable ipv6 %s was not properly applied", kernelArg),
Desc: fmt.Sprintf("Check that the kernel argument to disable ipv6 %s was properly applied", kernelArg),
},
},
}
behaviourValidatorRemove = UpdateBehaviourValidator{
Checkers: []Checker{
CommandOutputChecker{
Command: []string{"cat", "/proc/cmdline"},
Matcher: o.Not(o.ContainSubstring(kernelArg)),
ErrorMsg: fmt.Sprintf("The kernel argument to disable ipv6 %s was not properly removed", kernelArg),
Desc: fmt.Sprintf("Check that the kernel argument to disable ipv6 %s was properly removed", kernelArg),
},
},
}
)
if IsCompactOrSNOCluster(oc.AsAdmin()) {
g.Skip("This test case can only be executed in clusters with worker pool. Disable IPV6 is not supported in master nodes")
}
behaviourValidatorApply.Initialize(wMcp, nil)
exutil.By("Create a MC to disable ipv6 in worker pool")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.parameters = []string{fmt.Sprintf(`KERNEL_ARGS=["%s"]`, kernelArg)}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
logger.Infof("OK!\n")
// Check that the MC is applied according to the expected behaviour
behaviourValidatorApply.Validate()
behaviourValidatorRemove.Initialize(wMcp, nil)
exutil.By("Delete the MC created to disable ipv6 in worker pool")
mc.deleteNoWait()
logger.Infof("OK!\n")
behaviourValidatorRemove.Validate()
})
| |||||
test case
|
openshift/openshift-tests-private
|
a77ef701-de27-4a5c-a56e-ff5c6003a3fe
|
Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-High-74540-[P1][OnCLayer] kubelet does not start after reboot due to dependency issue[Disruptive]
|
['"encoding/json"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-High-74540-[P1][OnCLayer] kubelet does not start after reboot due to dependency issue[Disruptive]", func() {
var (
unitEnabled = true
unitName = "hello.service"
filePath = "/etc/systemd/system/default.target.wants/hello.service"
fileContent = "[Unit]\nDescription=A hello world unit\nAfter=network-online.target\nRequires=network-online.target\n[Service]\nType=oneshot\nRemainAfterExit=yes\nExecStart=/usr/bin/echo Hello, World\n[Install]\nWantedBy="
unitConfig = getSingleUnitConfig(unitName, unitEnabled, fileContent+"default.target")
mcName = "tc-74540"
mcp = GetCompactCompatiblePool(oc.AsAdmin())
node = mcp.GetSortedNodesOrFail()[0]
activeString = "Active: active"
inactiveString = "Active: inactive"
)
exutil.By("Create a MC to deploy a unit.")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.parameters = []string{fmt.Sprintf("UNITS=[%s]", unitConfig)}
defer mc.delete()
mc.create()
logger.Infof("OK \n")
exutil.By("Wait until worker MCP has finished the configuration. No machine should be degraded.")
mcp.waitForComplete()
logger.Infof("OK \n")
exutil.By("Verfiy file content is properly applied")
rf := NewRemoteFile(node, filePath)
o.Expect(rf.Read()).To(HaveContent(fileContent + "default.target"))
logger.Infof("OK \n")
exutil.By("Validate that the hello unit service is active")
o.Expect(
node.DebugNodeWithChroot("systemctl", "status", unitName),
).To(o.And(o.ContainSubstring(activeString), o.Not(o.ContainSubstring(inactiveString))),
"%s unit is not active", unitName)
logger.Infof("OK \n")
exutil.By("Update the unit of MC")
o.Expect(
mc.Patch("json", fmt.Sprintf(`[{ "op": "replace", "path": "/spec/config/systemd/units/0/contents", "value": %s}]`, jsonEncode(fileContent+"multi-user.target"))),
).To(o.Succeed(), "Error patching %s with the new WantedBy value", fileContent+"multi-user.target")
logger.Infof("OK \n")
exutil.By("Wait until worker MCP has finished the configuration. No machine should be degraded.")
mcp.waitForComplete()
logger.Infof("OK \n")
exutil.By("To verify that previous file does not exist")
o.Expect(rf).NotTo(Exist())
logger.Infof("OK \n")
exutil.By("To verify that new file does exist")
filePath = "/etc/systemd/system/multi-user.target.wants/hello.service"
rf = NewRemoteFile(node, filePath)
o.Expect(rf.Read()).To(HaveContent(fileContent + "multi-user.target"))
logger.Infof("OK \n")
exutil.By("Validate that the hello unit service is active after update")
o.Expect(
node.DebugNodeWithChroot("systemctl", "status", unitName),
).To(o.And(o.ContainSubstring(activeString), o.Not(o.ContainSubstring(inactiveString))),
"%s unit is not active", unitName)
logger.Infof("OK \n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
44a71835-c795-4388-abf5-9a89e7ba6b1e
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Critical-74608-[P2][OnCLayer] Env file /etc/kubernetes/node.env should not be overwritten after a node restart [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Critical-74608-[P2][OnCLayer] Env file /etc/kubernetes/node.env should not be overwritten after a node restart [Disruptive]", func() {
// /etc/kubernetes/node.env only exists in AWS
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform)
var (
node = GetCompactCompatiblePool(oc.AsAdmin()).GetSortedNodesOrFail()[0]
rEnvFile = NewRemoteFile(node, "/etc/kubernetes/node.env")
extraLine = "\nDUMMYKEY=DUMMYVAL"
)
exutil.By("Get current node.env content")
o.Expect(rEnvFile.Fetch()).To(o.Succeed(),
"Error getting information about %s", rEnvFile)
initialContent := rEnvFile.GetTextContent()
initialUser := rEnvFile.GetUIDName()
initialGroup := rEnvFile.GetGIDName()
initialPermissions := rEnvFile.GetOctalPermissions()
logger.Infof("Initial content: %s", initialContent)
logger.Infof("OK!\n")
exutil.By("Modify the content of the node.env file")
defer rEnvFile.PushNewTextContent(initialContent)
newContent := initialContent + extraLine
logger.Infof("New content: %s", newContent)
o.Expect(rEnvFile.PushNewTextContent(newContent)).To(o.Succeed(),
"Error writing new content in %s", rEnvFile)
logger.Infof("OK!\n")
exutil.By("Reboot node")
o.Expect(node.Reboot()).To(o.Succeed(),
"Error rebooting %s", node)
logger.Infof("OK!\n")
exutil.By("Check that the content was not changed after the node reboot")
o.Eventually(rEnvFile.Read, "15m", "20s").Should(o.And(
HaveContent(newContent),
HaveOwner(initialUser),
HaveGroup(initialGroup),
HaveOctalPermissions(initialPermissions)),
"The information in %s is not the expected one", rEnvFile)
logger.Infof("OK!\n")
exutil.By("Restore initial content")
o.Eventually(rEnvFile.PushNewTextContent, "5m", "20s").WithArguments(initialContent).Should(o.Succeed(),
"Error writing the initial content in %s", rEnvFile)
o.Expect(node.Reboot()).To(o.Succeed(),
"Error rebooting %s", node)
o.Eventually(rEnvFile.Read, "15m", "20s").Should(o.And(
HaveContent(initialContent),
HaveOwner(initialUser),
HaveGroup(initialGroup),
HaveOctalPermissions(initialPermissions)),
"The inforamtion of %s is not the expected one after restoring the initial content", rEnvFile)
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
11815e8c-b036-4832-84d8-9cc49377cfb5
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Critical-75258-[OnCLayer] No ordering cycle issues should exist [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Critical-75258-[OnCLayer] No ordering cycle issues should exist [Disruptive]", func() {
exutil.By("Check that there are no ordering cycle problems in the nodes")
for _, node := range exutil.OrFail[[]Node](NewNodeList(oc.AsAdmin()).GetAllLinux()) {
if node.HasTaintEffectOrFail("NoExecute") {
logger.Infof("Skipping node %s since it is tainted with NoExecute and no debug pod can be run in it", node.GetName())
continue
}
logger.Infof("Checking node %s", node.GetName())
// For debugging purposes. We ignore the error here
logMsg, _ := node.DebugNodeWithChroot("sh", "-c", `journalctl -o with-unit | grep "Found ordering cycle" -A 10 || true`)
logger.Infof("Orderging cycle messages: %s", logMsg)
o.Expect(node.DebugNodeWithChroot(`journalctl`, `-o`, `with-unit`)).NotTo(o.ContainSubstring("Found ordering cycle"),
"Ordering cycle problems found in node %s", node.GetName())
}
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
a78eeae2-db9b-41fe-b0c9-3d1427b476a2
|
Author:sregidor-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-75149-[P1][OnCLayer] Update pool with manually cordoned nodes [Disruptive]
|
['"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-Longduration-NonPreRelease-Medium-75149-[P1][OnCLayer] Update pool with manually cordoned nodes [Disruptive]", func() {
SkipIfSNO(oc.AsAdmin())
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
mcName = "mco-test-75149"
// to make the test execution faster we will use a password configuration for the automation
passwordHash = "fake-hash"
user = "core"
nodeList = NewNodeList(oc.AsAdmin())
)
if len(mcp.GetNodesOrFail()) < 3 {
logger.Infof("There are less than 3 nodes available in the worker node. Since we need at least 3 nodes we use the master pool for testing")
mcp = NewMachineConfigPool(mcp.GetOC(), MachineConfigPoolMaster)
}
exutil.By("Set the maxUnavailable value to 2")
mcp.SetMaxUnavailable(2)
defer mcp.RemoveMaxUnavailable()
logger.Infof("OK!\n")
exutil.By("Manually cordon one of the nodes")
nodes := mcp.GetNodesOrFail()
cordonedNode := nodes[0]
defer cordonedNode.Uncordon()
o.Expect(cordonedNode.Cordon()).To(o.Succeed(),
"Could not cordon node %s", cordonedNode.GetName())
logger.Infof("OK!\n")
exutil.By("Create a new MachineConfiguration resource")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[{"name":"%s", "passwordHash": "%s" }]`, user, passwordHash)}
mc.skipWaitForMcp = true
defer mc.delete()
defer cordonedNode.Uncordon()
mc.create()
logger.Infof("OK!\n")
exutil.By("Check that only one node is updated at a time (instead of 2) because the manually cordoned node counts as unavailable")
// get all nodes with status != Done
nodeList.SetItemsFilter(`?(@.metadata.annotations.machineconfiguration\.openshift\.io/state!="Done")`)
o.Consistently(func() (int, error) {
nodes, err := nodeList.GetAll()
return len(nodes), err
}, "3m", "10s").Should(o.BeNumerically("<", 2),
"The maximun number of nodes updated at a time should be 1, because the manually cordoned node should count as unavailable too")
logger.Infof("OK!\n")
exutil.By("Check that all nodes are updated but the manually cordoned one")
numNodes := len(nodes)
waitDuration := mcp.estimateWaitDuration().String()
o.Eventually(mcp.getUpdatedMachineCount, waitDuration, "15s").Should(o.Equal(numNodes-1),
"All nodes but one should be udated. %d total nodes, expecting %d to be updated", numNodes, numNodes-1)
// We check that the desired config for the manually cordoned node is the old config, and not the new one
o.Consistently(cordonedNode.GetDesiredMachineConfig, "2m", "20s").Should(o.Equal(mcp.getConfigNameOfStatusOrFail()),
"The manually cordoned node should not be updated. The desiredConfig value should be the old one.")
logger.Infof("OK!\n")
exutil.By("Manually undordon the cordoned node")
o.Expect(cordonedNode.Uncordon()).To(o.Succeed(),
"Could not uncordon the manually cordoned node")
logger.Infof("OK!\n")
exutil.By("All nodes should be updated now")
mcp.waitForComplete()
// Make sure that the cordoned node is now using the new configuration
o.Eventually(cordonedNode.GetDesiredMachineConfig, "30s", "10s").Should(o.Equal(mcp.getConfigNameOfSpecOrFail()),
"The manually cordoned node should not be updated. The desiredConfig value should be the old one.")
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
51b54bd9-bd0a-476f-a018-fec3b83b21ed
|
Author:sregidor-NonHyperShiftHOST-Longduration-NonPreRelease-Critical-76108-[P2] MachineConfig inheritance. Canary rollout update [Disruptive]
|
['"encoding/json"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:sregidor-NonHyperShiftHOST-Longduration-NonPreRelease-Critical-76108-[P2] MachineConfig inheritance. Canary rollout update [Disruptive]", func() {
SkipIfCompactOrSNO(oc) // We can't create custom pools if only the master pool exists
var (
customMCPName = "worker-perf"
canaryMCPName = "worker-perf-canary"
mcName = "06-kdump-enable-worker-perf-tc-76108"
mcUnit = `{"enabled": true, "name": "kdump.service"}`
mcKernelArgs = "crashkernel=512M"
mc = NewMachineConfig(oc.AsAdmin(), mcName, customMCPName)
)
defer mc.deleteNoWait()
exutil.By("Create custom MCP")
defer DeleteCustomMCP(oc.AsAdmin(), customMCPName)
customMcp, err := CreateCustomMCP(oc.AsAdmin(), customMCPName, 2)
o.Expect(err).NotTo(o.HaveOccurred(), "Could not create a new custom MCP")
logger.Infof("OK!\n")
exutil.By("Create canary custom MCP")
defer DeleteCustomMCP(oc.AsAdmin(), canaryMCPName)
canaryMcp, err := CreateCustomMCP(oc.AsAdmin(), canaryMCPName, 0)
o.Expect(err).NotTo(o.HaveOccurred(), "Could not create a new custom MCP")
logger.Infof("OK!\n")
exutil.By("Patch the canary MCP so that it uses the MCs of the custom MCP too")
o.Expect(
canaryMcp.Patch("json", `[{ "op": "add", "path": "/spec/machineConfigSelector/matchExpressions/0/values/-", "value":"`+customMCPName+`"}]`),
).To(o.Succeed(), "Error patching MCP %s so that it uses the same MCs as MCP %s", canaryMcp.GetName(), customMcp.GetName())
logger.Infof("OK!\n")
exutil.By("Apply a new MC to the custom pool")
err = mc.Create("-p", "NAME="+mcName, "-p", "POOL="+customMCPName, "-p", fmt.Sprintf("UNITS=[%s]", mcUnit), fmt.Sprintf(`KERNEL_ARGS=["%s"]`, mcKernelArgs))
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating MachineConfig %s", mc.GetName())
customMcp.waitForComplete()
exutil.By("Check that the configuration was applied the nodes")
canaryNode := customMcp.GetNodesOrFail()[0]
o.Expect(canaryNode.IsKernelArgEnabled(mcKernelArgs)).Should(o.BeTrue(), "Kernel argument %s is not set in node %s", mcKernelArgs, canaryNode)
logger.Infof("OK!\n")
exutil.By("Move one node from the custom pool to the canary custom pool")
startTime := canaryNode.GetDateOrFail()
o.Expect(
canaryNode.AddLabel("node-role.kubernetes.io/"+canaryMCPName, ""),
).To(o.Succeed(), "Error labeling node %s", canaryNode)
o.Expect(
canaryNode.RemoveLabel("node-role.kubernetes.io/"+customMCPName),
).To(o.Succeed(), "Error removing label from node %s", canaryNode)
o.Eventually(canaryMcp.getMachineCount, "5m", "20s").Should(o.Equal(1),
"A machine should be added to the canary MCP, but no machine was added: %s", canaryMcp.PrettyString())
o.Eventually(customMcp.getMachineCount, "5m", "20s").Should(o.Equal(1),
"A machine should be removed from the custom MCP: %s", customMcp.PrettyString())
canaryMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the configuration is still applied to the canary node")
o.Expect(canaryNode.IsKernelArgEnabled(mcKernelArgs)).Should(o.BeTrue(), "Kernel argument %s is not set in node %s", mcKernelArgs, canaryNode)
logger.Infof("OK!\n")
exutil.By("Check that the node was not restarted when it was added to the canary pool")
checkRebootAction(false, canaryNode, startTime)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
8a94b60d-9e5b-4c48-88f7-8a2c6cfbef3c
|
Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Medium-68683-[OnCLayer] nodelogs feature works fine [Disruptive]
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco.go
|
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Medium-68683-[OnCLayer] nodelogs feature works fine [Disruptive]", func() {
var (
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
workerNode = wMcp.GetNodesOrFail()[0]
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
masterNode = mMcp.GetNodesOrFail()[0]
)
verifyCmd := func(node Node) {
exutil.By(fmt.Sprintf("Check that the node-logs cmd work for %s node", node.name))
nodeLogs, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("node-logs", node.name, "--tail=20").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Cannot get the node-logs cmd for %s node", node.name))
o.Expect(len(strings.Split(nodeLogs, "\n"))).To(o.BeNumerically(">=", 5)) // check the logs line are greater than 5
}
verifyCmd(workerNode)
verifyCmd(masterNode)
})
| |||||
test
|
openshift/openshift-tests-private
|
3f7f3d68-bdd8-4fe7-b358-a367d62296a6
|
mco_alerts
|
import (
"fmt"
"regexp"
"time"
"github.com/onsi/gomega/types"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_alerts.go
|
package mco
import (
"fmt"
"regexp"
"time"
"github.com/onsi/gomega/types"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
var _ = g.Describe("[sig-mco] MCO alerts", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("mco-alerts", exutil.KubeConfigPath())
// CoreOs compatible MachineConfigPool (if worker pool has CoreOs nodes, then it is worker pool, else it is master pool because mater nodes are always CoreOs)
coMcp *MachineConfigPool
// Compact compatible MCP. If the node is compact/SNO this variable will be the master pool, else it will be the worker pool
mcp *MachineConfigPool
// master MCP
mMcp *MachineConfigPool
)
g.JustBeforeEach(func() {
coMcp = GetCoreOsCompatiblePool(oc.AsAdmin())
mcp = GetCompactCompatiblePool(oc.AsAdmin())
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
preChecks(oc)
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-63865-[P1] MCDRebootError alert[Disruptive]", func() {
var (
mcName = "mco-tc-63865-reboot-alert"
filePath = "/etc/mco-tc-63865-test.test"
fileContent = "test"
fileMode = 420 // decimal 0644
expectedAlertName = "MCDRebootError"
expectedAlertSeverity = "critical"
alertFiredAfter = 5 * time.Minute
alertStillPresentAfter = 10 * time.Minute
)
exutil.By("Break the reboot process in a node")
node := mcp.GetSortedNodesOrFail()[0]
defer func() {
_ = FixRebootInNode(&node)
mcp.WaitForUpdatedStatus()
}()
o.Expect(BreakRebootInNode(&node)).To(o.Succeed(),
"Error breaking the reboot process in node %s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Create a MC to force a reboot")
file := ign32File{
Path: filePath,
Contents: ign32Contents{
Source: GetBase64EncodedFileSourceContent(fileContent),
},
Mode: PtrInt(fileMode),
}
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.skipWaitForMcp = true
defer mc.deleteNoWait()
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", MarshalOrFail(file))}
mc.create()
logger.Infof("OK!\n")
// Check that the expected alert is fired with the right values
expectedDegradedMessage := fmt.Sprintf(`Node %s is reporting: "reboot command failed, something is seriously wrong"`,
node.GetName())
expectedAlertLabels := expectedAlertValues{"severity": o.Equal(expectedAlertSeverity)}
expectedAlertAnnotationDescription := fmt.Sprintf("Reboot failed on %s , update may be blocked. For more details: oc logs -f -n openshift-machine-config-operator machine-config-daemon",
node.GetName())
expectedAlertAnnotationSummary := "Alerts the user that a node failed to reboot one or more times over a span of 5 minutes."
expectedAlertAnnotations := expectedAlertValues{
"description": o.ContainSubstring(expectedAlertAnnotationDescription),
"summary": o.Equal(expectedAlertAnnotationSummary),
}
params := checkFiredAlertParams{
expectedAlertName: expectedAlertName,
expectedDegradedMessage: regexp.QuoteMeta(expectedDegradedMessage),
expectedAlertLabels: expectedAlertLabels,
expectedAlertAnnotations: expectedAlertAnnotations,
pendingDuration: alertFiredAfter,
// Because of OCPBUGS-5497, we need to check that the alert is already present after 15 minutes.
// We have waited 5 minutes to test the "firing" state, so we only have to wait 10 minutes more to test the 15 minutes needed since OCPBUGS-5497
stillPresentDuration: alertStillPresentAfter,
}
checkFiredAlert(oc, mcp, params)
exutil.By("Fix the reboot process in the node")
o.Expect(FixRebootInNode(&node)).To(o.Succeed(),
"Error fixing the reboot process in node %s", node.GetName())
logger.Infof("OK!\n")
checkFixedAlert(oc, mcp, expectedAlertName)
})
g.It("Author:sregidor-VMonly-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-63866- [P2] MCDPivotError alert[Disruptive]", func() {
var (
mcName = "mco-tc-63866-pivot-alert"
expectedAlertName = "MCDPivotError"
alertFiredAfter = 2 * time.Minute
dockerFileCommands = `RUN echo 'Hello world' > /etc/hello-world-file`
expectedAlertSeverity = "warning"
)
// We use master MCP because like that we make sure that we are using a CoreOs node
exutil.By("Break the reboot process in a node")
// We sort the coreOs list to make sure that we break the first updated not to make the test faster
node := sortNodeList(coMcp.GetCoreOsNodesOrFail())[0]
defer func() {
_ = FixRebaseInNode(&node)
coMcp.WaitForUpdatedStatus()
}()
o.Expect(BreakRebaseInNode(&node)).To(o.Succeed(),
"Error breaking the rpm-ostree rebase process in node %s", node.GetName())
logger.Infof("OK!\n")
// Build a new osImage that we will use to force a rebase in the broken node
exutil.By("Build new OSImage")
osImageBuilder := OsImageBuilderInNode{node: node, dockerFileCommands: dockerFileCommands}
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create MC to force the rebase operation
exutil.By("Create a MC to deploy the new osImage")
mc := NewMachineConfig(oc.AsAdmin(), mcName, coMcp.GetName())
mc.parameters = []string{"OS_IMAGE=" + digestedImage}
mc.skipWaitForMcp = true
defer mc.deleteNoWait()
mc.create()
logger.Infof("OK\n")
// Check that the expected alert is fired with the right values
expectedDegradedMessage := fmt.Sprintf(`Node %s is reporting: "failed to update OS to %s`,
node.GetName(), digestedImage)
expectedAlertLabels := expectedAlertValues{"severity": o.Equal(expectedAlertSeverity)}
expectedAlertAnnotationDescription := fmt.Sprintf("Error detected in pivot logs on %s , upgrade may be blocked. For more details: oc logs -f -n openshift-machine-config-operator machine-config-daemon-",
node.GetName())
expectedAlertAnnotationSummary := "Alerts the user when an error is detected upon pivot. This triggers if the pivot errors are above zero for 2 minutes."
expectedAlertAnnotations := expectedAlertValues{
"description": o.ContainSubstring(expectedAlertAnnotationDescription),
"summary": o.Equal(expectedAlertAnnotationSummary),
}
params := checkFiredAlertParams{
expectedAlertName: expectedAlertName,
expectedDegradedMessage: regexp.QuoteMeta(expectedDegradedMessage),
expectedAlertLabels: expectedAlertLabels,
expectedAlertAnnotations: expectedAlertAnnotations,
pendingDuration: alertFiredAfter,
stillPresentDuration: 0, // We skip this validation to make the test faster
}
checkFiredAlert(oc, coMcp, params)
exutil.By("Fix the rpm-ostree rebase process in the node")
o.Expect(FixRebaseInNode(&node)).To(o.Succeed(),
"Error fixing the rpm-ostree rebase process in node %s", node.GetName())
logger.Infof("OK!\n")
checkFixedAlert(oc, coMcp, expectedAlertName)
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-62075-[OnCLayer] MCCPoolAlert. Test support for a node pool hierarchy [Disruptive]", func() {
var (
iMcpName = "infra"
expectedAlertName = "MCCPoolAlert"
expectedAlertSeverity = "warning"
masterNode = mMcp.GetNodesOrFail()[0]
mcc = NewController(oc.AsAdmin())
)
numMasterNodes, err := mMcp.getMachineCount()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the machinecount field in % MCP", mMcp.GetName())
exutil.By("Add label as infra to the existing master node")
infraLabel := "node-role.kubernetes.io/infra"
defer func() {
// ignore output, just focus on error handling, if error is occurred, fail this case
_, deletefailure := masterNode.DeleteLabel(infraLabel)
o.Expect(deletefailure).NotTo(o.HaveOccurred())
}()
err = masterNode.AddLabel(infraLabel, "")
o.Expect(err).NotTo(o.HaveOccurred(),
"Could not add the label %s to node %s", infraLabel, masterNode)
logger.Infof("OK!\n")
exutil.By("Create custom infra mcp")
iMcpTemplate := generateTemplateAbsolutePath("custom-machine-config-pool.yaml")
iMcp := NewMachineConfigPool(oc.AsAdmin(), iMcpName)
iMcp.template = iMcpTemplate
// We need to wait for the label to be delete before removing the MCP. Otherwise the worker pool
// becomes Degraded.
defer func() {
_, deletefailure := masterNode.DeleteLabel(infraLabel)
// We don't fail if there is a problem because we need to delete the infra MCP
// We will try to remove the label again in the next defer section
if deletefailure != nil {
logger.Errorf("Error deleting label '%s' in node '%s'", infraLabel, masterNode.GetName())
}
_ = masterNode.WaitForLabelRemoved(infraLabel)
iMcp.delete()
}()
iMcp.create()
logger.Infof("OK!\n")
exutil.By("Check that the controller logs are reporting the conflict")
o.Eventually(
mcc.GetLogs, "5m", "10s",
).Should(o.ContainSubstring("Found master node that matches selector for custom pool %s, defaulting to master. This node will not have any custom role configuration as a result. Please review the node to make sure this is intended", iMcpName),
"The MCO controller is not reporting a machine config pool conflict in the logs")
logger.Infof("OK!\n")
exutil.By(`Check that the master node remains in master pool and is moved to "infra" pool or simply removed from master pool`)
o.Consistently(mMcp.getMachineCount, "30s", "10s").Should(o.Equal(numMasterNodes),
"The number of machines in the MCP has changed!\n%s", mMcp.PrettyString())
o.Consistently(iMcp.getMachineCount, "30s", "10s").Should(o.Equal(0),
"No node should be added to the custom pool!\n%s", iMcp.PrettyString())
logger.Infof("OK!\n")
// Check that the expected alert is fired with the right values
exutil.By(`Check that the right alert was triggered`)
expectedAlertLabels := expectedAlertValues{"severity": o.Equal(expectedAlertSeverity)}
expectedAlertAnnotationDescription := fmt.Sprintf("Node .* has triggered a pool alert due to a label change")
expectedAlertAnnotationSummary := "Triggers when nodes in a pool have overlapping labels such as master, worker, and a custom label therefore a choice must be made as to which is honored."
expectedAlertAnnotations := expectedAlertValues{
"description": o.MatchRegexp(expectedAlertAnnotationDescription),
"summary": o.Equal(expectedAlertAnnotationSummary),
}
params := checkFiredAlertParams{
expectedAlertName: expectedAlertName,
expectedAlertLabels: expectedAlertLabels,
expectedAlertAnnotations: expectedAlertAnnotations,
pendingDuration: 0,
stillPresentDuration: 0, // We skip this validation to make the test faster
}
checkFiredAlert(oc, nil, params)
logger.Infof("OK!\n")
exutil.By("Remove the label from the master node in order to fix the problem")
_, err = masterNode.DeleteLabel(infraLabel)
o.Expect(err).NotTo(o.HaveOccurred(),
"Could not delete the %s label in node %s", infraLabel, masterNode)
o.Expect(
masterNode.WaitForLabelRemoved(infraLabel),
).To(o.Succeed(),
"The label %s was not removed from node %s", infraLabel, masterNode)
logger.Infof("OK!\n")
exutil.By("Check that the alert is not triggered anymore")
checkFixedAlert(oc, coMcp, expectedAlertName)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-73841-[P1][OnCLayer] KubeletHealthState alert [Disruptive]", func() {
var (
node = mcp.GetSortedNodesOrFail()[0]
fixed = false
expectedAlertName = "KubeletHealthState"
expectedAlertSeverity = "warning"
expectedAlertAnnotationDescription = "Kubelet health failure threshold reached"
expectedAlertAnnotationSummary = "This keeps track of Kubelet health failures, and tallies them. The warning is triggered if 2 or more failures occur."
)
exutil.By("Break kubelet")
// We stop the kubelet service to break the node and after 5 minutes we start it again to fix the node
go func() {
defer g.GinkgoRecover()
_, err := node.DebugNodeWithChroot("sh", "-c", "systemctl stop kubelet.service; sleep 300; systemctl start kubelet.service")
o.Expect(err).NotTo(o.HaveOccurred(),
"Error stopping and restarting kubelet in %s", node)
logger.Infof("Kubelet service has been restarted again")
fixed = true
}()
logger.Infof("OK!\n")
expectedAlertLabels := expectedAlertValues{
"severity": o.Equal(expectedAlertSeverity),
}
expectedAlertAnnotations := expectedAlertValues{
"description": o.MatchRegexp(expectedAlertAnnotationDescription),
"summary": o.Equal(expectedAlertAnnotationSummary),
}
params := checkFiredAlertParams{
expectedAlertName: expectedAlertName,
expectedAlertLabels: expectedAlertLabels,
expectedAlertAnnotations: expectedAlertAnnotations,
pendingDuration: 0,
stillPresentDuration: 0, // We skip this validation to make the test faster
}
checkFiredAlert(oc, nil, params)
exutil.By("Wait for the kubelet service to be restarted")
o.Eventually(func() bool { return fixed }, "5m", "20s").Should(o.BeTrue(), "Kubelet service was not restarted")
o.Eventually(&node).Should(HaveConditionField("Ready", "status", TrueString), "Node %s didn't become ready after kubelet was restarted", node)
logger.Infof("OK!\n")
exutil.By("Check that the alert is not triggered anymore")
checkFixedAlert(oc, mcp, expectedAlertName)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-75862-[P2][OnCLayer] Add alert for users of deprecating the Image Registry workaround [Disruptive]", func() {
var (
expectedAlertName = "MCODrainOverrideConfigMapAlert"
expectedAlertSeverity = "warning"
expectedAlertAnnotationDescription = "Image Registry Drain Override configmap has been detected. Please use the Node Disruption Policy feature to control the cluster's drain behavior as the configmap method is currently deprecated and will be removed in a future release."
expectedAlertAnnotationSummary = "Alerts the user to the presence of a drain override configmap that is being deprecated and removed in a future release."
overrideCMName = "image-registry-override-drain"
)
exutil.By("Create an image-registry-override-drain configmap")
defer oc.AsAdmin().Run("delete").Args("configmap", "-n", MachineConfigNamespace, overrideCMName).Execute()
o.Expect(
oc.AsAdmin().Run("create").Args("configmap", "-n", MachineConfigNamespace, overrideCMName).Execute(),
).To(o.Succeed(), "Error creating the image-registry override configmap")
logger.Infof("OK!\n")
expectedAlertLabels := expectedAlertValues{
"severity": o.Equal(expectedAlertSeverity),
}
expectedAlertAnnotations := expectedAlertValues{
"description": o.MatchRegexp(expectedAlertAnnotationDescription),
"summary": o.Equal(expectedAlertAnnotationSummary),
}
params := checkFiredAlertParams{
expectedAlertName: expectedAlertName,
expectedAlertLabels: expectedAlertLabels,
expectedAlertAnnotations: expectedAlertAnnotations,
pendingDuration: 0,
stillPresentDuration: 0, // We skip this validation to make the test faster
}
checkFiredAlert(oc, nil, params)
exutil.By("Delete the image-registry-override-drain configmap")
o.Expect(
oc.AsAdmin().Run("delete").Args("configmap", "-n", MachineConfigNamespace, overrideCMName).Execute(),
).To(o.Succeed(), "Error deleting the image-registry override configmap")
logger.Infof("OK!\n")
exutil.By("Check that the alert is not triggered anymore")
checkFixedAlert(oc, mcp, expectedAlertName)
logger.Infof("OK!\n")
})
})
type expectedAlertValues map[string]types.GomegaMatcher
type checkFiredAlertParams struct {
expectedAlertLabels expectedAlertValues
expectedAlertAnnotations expectedAlertValues
// regexp that should match the MCP degraded message
expectedDegradedMessage string
expectedAlertName string
pendingDuration time.Duration
stillPresentDuration time.Duration
}
func checkFiredAlert(oc *exutil.CLI, mcp *MachineConfigPool, params checkFiredAlertParams) {
if mcp != nil {
exutil.By("Wait for MCP to be degraded")
o.Eventually(mcp,
"15m", "30s").Should(BeDegraded(),
"The %s MCP should be degraded when the reboot process is broken. But it didn't.", mcp.GetName())
logger.Infof("OK!\n")
exutil.By("Verify that the pool reports the right error message")
o.Expect(mcp).To(HaveNodeDegradedMessage(o.MatchRegexp(params.expectedDegradedMessage)),
"The %s MCP is not reporting the right error message", mcp.GetName())
logger.Infof("OK!\n")
}
exutil.By("Verify that the alert is triggered")
var alertJSON []JSONData
var alertErr error
o.Eventually(func() ([]JSONData, error) {
alertJSON, alertErr = getAlertsByName(oc, params.expectedAlertName)
return alertJSON, alertErr
}, "5m", "20s").Should(o.HaveLen(1),
"Expected 1 %s alert and only 1 to be triggered!", params.expectedAlertName)
logger.Infof("Found %s alerts: %s", params.expectedAlertName, alertJSON)
alertMap := alertJSON[0].ToMap()
annotationsMap := alertJSON[0].Get("annotations").ToMap()
logger.Infof("OK!\n")
if params.expectedAlertAnnotations != nil {
exutil.By("Verify alert's annotations")
// Check all expected annotations
for annotation, expectedMatcher := range params.expectedAlertAnnotations {
logger.Infof("Verifying annotation: %s", annotation)
o.Expect(annotationsMap).To(o.HaveKeyWithValue(annotation, expectedMatcher),
"The alert is reporting a wrong '%s' annotation value", annotation)
}
logger.Infof("OK!\n")
} else {
logger.Infof("No annotations checks needed!")
}
exutil.By("Verify alert's labels")
labelsMap := alertJSON[0].Get("labels").ToMap()
// Since OCPBUGS-904 we need to check that the namespace is reported properly in all the alerts
o.Expect(labelsMap).To(o.HaveKeyWithValue("namespace", MachineConfigNamespace),
"Expected the alert to report the MCO namespace")
if params.expectedAlertLabels != nil {
// Check all expected labels
for label, expectedMatcher := range params.expectedAlertLabels {
logger.Infof("Verifying label: %s", label)
o.Expect(labelsMap).To(o.HaveKeyWithValue(label, expectedMatcher),
"The alert is reporting a wrong '%s' label value", label)
}
} else {
logger.Infof("No extra labels checks needed!")
}
logger.Infof("OK!\n")
if params.pendingDuration != 0 {
exutil.By("Verify that the alert is pending")
o.Expect(alertMap).To(o.HaveKeyWithValue("state", "pending"),
"Expected the alert's state to be 'pending', but it is not.")
logger.Infof("OK!\n")
}
exutil.By("Verify that the alert is in firing state")
if params.pendingDuration != 0 {
logger.Infof("Wait %s minutes until the alert is fired", params.pendingDuration)
time.Sleep(params.pendingDuration)
}
logger.Infof("Checking alert's state")
o.Eventually(func() ([]JSONData, error) {
alertJSON, alertErr = getAlertsByName(oc, params.expectedAlertName)
return alertJSON, alertErr
}, "5m", "20s").Should(o.HaveLen(1),
"Expected 1 %s alert and only 1 to be triggered!", params.expectedAlertName)
logger.Infof("Found %s alerts: %s", params.expectedAlertName, alertJSON)
alertMap = alertJSON[0].ToMap()
o.Expect(alertMap).To(o.HaveKeyWithValue("state", "firing"),
"Expected the alert to report 'firing' state")
logger.Infof("OK!\n")
if params.stillPresentDuration.Minutes() != 0 {
exutil.By(fmt.Sprintf("Verfiy that the alert is not removed after %s", params.stillPresentDuration))
o.Consistently(getAlertsByName, params.stillPresentDuration, params.stillPresentDuration/3).WithArguments(oc, params.expectedAlertName).
Should(o.HaveLen(1),
"Expected %s alert to be present, but the alert was removed for no reason!", params.expectedAlertName)
logger.Infof("OK!\n")
}
}
func checkFixedAlert(oc *exutil.CLI, mcp *MachineConfigPool, expectedAlertName string) {
exutil.By("Verfiy that the pool stops being degraded")
o.Eventually(mcp,
"10m", "30s").ShouldNot(BeDegraded(),
"After fixing the reboot process the %s MCP should stop being degraded", mcp.GetName())
logger.Infof("OK!\n")
exutil.By("Verfiy that the alert is not triggered anymore")
o.Eventually(getAlertsByName, "5m", "20s").WithArguments(oc, expectedAlertName).
Should(o.HaveLen(0),
"Expected %s alert to be removed after the problem is fixed!", expectedAlertName)
logger.Infof("OK!\n")
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
75e832dc-27d8-4da7-92dc-b929dbdcb216
|
checkFiredAlert
|
['"fmt"', '"time"']
|
['checkFiredAlertParams']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_alerts.go
|
func checkFiredAlert(oc *exutil.CLI, mcp *MachineConfigPool, params checkFiredAlertParams) {
if mcp != nil {
exutil.By("Wait for MCP to be degraded")
o.Eventually(mcp,
"15m", "30s").Should(BeDegraded(),
"The %s MCP should be degraded when the reboot process is broken. But it didn't.", mcp.GetName())
logger.Infof("OK!\n")
exutil.By("Verify that the pool reports the right error message")
o.Expect(mcp).To(HaveNodeDegradedMessage(o.MatchRegexp(params.expectedDegradedMessage)),
"The %s MCP is not reporting the right error message", mcp.GetName())
logger.Infof("OK!\n")
}
exutil.By("Verify that the alert is triggered")
var alertJSON []JSONData
var alertErr error
o.Eventually(func() ([]JSONData, error) {
alertJSON, alertErr = getAlertsByName(oc, params.expectedAlertName)
return alertJSON, alertErr
}, "5m", "20s").Should(o.HaveLen(1),
"Expected 1 %s alert and only 1 to be triggered!", params.expectedAlertName)
logger.Infof("Found %s alerts: %s", params.expectedAlertName, alertJSON)
alertMap := alertJSON[0].ToMap()
annotationsMap := alertJSON[0].Get("annotations").ToMap()
logger.Infof("OK!\n")
if params.expectedAlertAnnotations != nil {
exutil.By("Verify alert's annotations")
// Check all expected annotations
for annotation, expectedMatcher := range params.expectedAlertAnnotations {
logger.Infof("Verifying annotation: %s", annotation)
o.Expect(annotationsMap).To(o.HaveKeyWithValue(annotation, expectedMatcher),
"The alert is reporting a wrong '%s' annotation value", annotation)
}
logger.Infof("OK!\n")
} else {
logger.Infof("No annotations checks needed!")
}
exutil.By("Verify alert's labels")
labelsMap := alertJSON[0].Get("labels").ToMap()
// Since OCPBUGS-904 we need to check that the namespace is reported properly in all the alerts
o.Expect(labelsMap).To(o.HaveKeyWithValue("namespace", MachineConfigNamespace),
"Expected the alert to report the MCO namespace")
if params.expectedAlertLabels != nil {
// Check all expected labels
for label, expectedMatcher := range params.expectedAlertLabels {
logger.Infof("Verifying label: %s", label)
o.Expect(labelsMap).To(o.HaveKeyWithValue(label, expectedMatcher),
"The alert is reporting a wrong '%s' label value", label)
}
} else {
logger.Infof("No extra labels checks needed!")
}
logger.Infof("OK!\n")
if params.pendingDuration != 0 {
exutil.By("Verify that the alert is pending")
o.Expect(alertMap).To(o.HaveKeyWithValue("state", "pending"),
"Expected the alert's state to be 'pending', but it is not.")
logger.Infof("OK!\n")
}
exutil.By("Verify that the alert is in firing state")
if params.pendingDuration != 0 {
logger.Infof("Wait %s minutes until the alert is fired", params.pendingDuration)
time.Sleep(params.pendingDuration)
}
logger.Infof("Checking alert's state")
o.Eventually(func() ([]JSONData, error) {
alertJSON, alertErr = getAlertsByName(oc, params.expectedAlertName)
return alertJSON, alertErr
}, "5m", "20s").Should(o.HaveLen(1),
"Expected 1 %s alert and only 1 to be triggered!", params.expectedAlertName)
logger.Infof("Found %s alerts: %s", params.expectedAlertName, alertJSON)
alertMap = alertJSON[0].ToMap()
o.Expect(alertMap).To(o.HaveKeyWithValue("state", "firing"),
"Expected the alert to report 'firing' state")
logger.Infof("OK!\n")
if params.stillPresentDuration.Minutes() != 0 {
exutil.By(fmt.Sprintf("Verfiy that the alert is not removed after %s", params.stillPresentDuration))
o.Consistently(getAlertsByName, params.stillPresentDuration, params.stillPresentDuration/3).WithArguments(oc, params.expectedAlertName).
Should(o.HaveLen(1),
"Expected %s alert to be present, but the alert was removed for no reason!", params.expectedAlertName)
logger.Infof("OK!\n")
}
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
037ac699-9c07-48db-81c5-b5943355409e
|
checkFixedAlert
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_alerts.go
|
func checkFixedAlert(oc *exutil.CLI, mcp *MachineConfigPool, expectedAlertName string) {
exutil.By("Verfiy that the pool stops being degraded")
o.Eventually(mcp,
"10m", "30s").ShouldNot(BeDegraded(),
"After fixing the reboot process the %s MCP should stop being degraded", mcp.GetName())
logger.Infof("OK!\n")
exutil.By("Verfiy that the alert is not triggered anymore")
o.Eventually(getAlertsByName, "5m", "20s").WithArguments(oc, expectedAlertName).
Should(o.HaveLen(0),
"Expected %s alert to be removed after the problem is fixed!", expectedAlertName)
logger.Infof("OK!\n")
}
|
mco
| |||||
test case
|
openshift/openshift-tests-private
|
bea220f4-ba05-4c32-b82e-c2105008dcd4
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-63865-[P1] MCDRebootError alert[Disruptive]
|
['"fmt"', '"regexp"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_alerts.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-63865-[P1] MCDRebootError alert[Disruptive]", func() {
var (
mcName = "mco-tc-63865-reboot-alert"
filePath = "/etc/mco-tc-63865-test.test"
fileContent = "test"
fileMode = 420 // decimal 0644
expectedAlertName = "MCDRebootError"
expectedAlertSeverity = "critical"
alertFiredAfter = 5 * time.Minute
alertStillPresentAfter = 10 * time.Minute
)
exutil.By("Break the reboot process in a node")
node := mcp.GetSortedNodesOrFail()[0]
defer func() {
_ = FixRebootInNode(&node)
mcp.WaitForUpdatedStatus()
}()
o.Expect(BreakRebootInNode(&node)).To(o.Succeed(),
"Error breaking the reboot process in node %s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Create a MC to force a reboot")
file := ign32File{
Path: filePath,
Contents: ign32Contents{
Source: GetBase64EncodedFileSourceContent(fileContent),
},
Mode: PtrInt(fileMode),
}
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.skipWaitForMcp = true
defer mc.deleteNoWait()
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", MarshalOrFail(file))}
mc.create()
logger.Infof("OK!\n")
// Check that the expected alert is fired with the right values
expectedDegradedMessage := fmt.Sprintf(`Node %s is reporting: "reboot command failed, something is seriously wrong"`,
node.GetName())
expectedAlertLabels := expectedAlertValues{"severity": o.Equal(expectedAlertSeverity)}
expectedAlertAnnotationDescription := fmt.Sprintf("Reboot failed on %s , update may be blocked. For more details: oc logs -f -n openshift-machine-config-operator machine-config-daemon",
node.GetName())
expectedAlertAnnotationSummary := "Alerts the user that a node failed to reboot one or more times over a span of 5 minutes."
expectedAlertAnnotations := expectedAlertValues{
"description": o.ContainSubstring(expectedAlertAnnotationDescription),
"summary": o.Equal(expectedAlertAnnotationSummary),
}
params := checkFiredAlertParams{
expectedAlertName: expectedAlertName,
expectedDegradedMessage: regexp.QuoteMeta(expectedDegradedMessage),
expectedAlertLabels: expectedAlertLabels,
expectedAlertAnnotations: expectedAlertAnnotations,
pendingDuration: alertFiredAfter,
// Because of OCPBUGS-5497, we need to check that the alert is already present after 15 minutes.
// We have waited 5 minutes to test the "firing" state, so we only have to wait 10 minutes more to test the 15 minutes needed since OCPBUGS-5497
stillPresentDuration: alertStillPresentAfter,
}
checkFiredAlert(oc, mcp, params)
exutil.By("Fix the reboot process in the node")
o.Expect(FixRebootInNode(&node)).To(o.Succeed(),
"Error fixing the reboot process in node %s", node.GetName())
logger.Infof("OK!\n")
checkFixedAlert(oc, mcp, expectedAlertName)
})
| |||||
test case
|
openshift/openshift-tests-private
|
57851f09-59ee-4835-9634-959cd34dd870
|
Author:sregidor-VMonly-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-63866- [P2] MCDPivotError alert[Disruptive]
|
['"fmt"', '"regexp"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_alerts.go
|
g.It("Author:sregidor-VMonly-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-63866- [P2] MCDPivotError alert[Disruptive]", func() {
var (
mcName = "mco-tc-63866-pivot-alert"
expectedAlertName = "MCDPivotError"
alertFiredAfter = 2 * time.Minute
dockerFileCommands = `RUN echo 'Hello world' > /etc/hello-world-file`
expectedAlertSeverity = "warning"
)
// We use master MCP because like that we make sure that we are using a CoreOs node
exutil.By("Break the reboot process in a node")
// We sort the coreOs list to make sure that we break the first updated not to make the test faster
node := sortNodeList(coMcp.GetCoreOsNodesOrFail())[0]
defer func() {
_ = FixRebaseInNode(&node)
coMcp.WaitForUpdatedStatus()
}()
o.Expect(BreakRebaseInNode(&node)).To(o.Succeed(),
"Error breaking the rpm-ostree rebase process in node %s", node.GetName())
logger.Infof("OK!\n")
// Build a new osImage that we will use to force a rebase in the broken node
exutil.By("Build new OSImage")
osImageBuilder := OsImageBuilderInNode{node: node, dockerFileCommands: dockerFileCommands}
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create MC to force the rebase operation
exutil.By("Create a MC to deploy the new osImage")
mc := NewMachineConfig(oc.AsAdmin(), mcName, coMcp.GetName())
mc.parameters = []string{"OS_IMAGE=" + digestedImage}
mc.skipWaitForMcp = true
defer mc.deleteNoWait()
mc.create()
logger.Infof("OK\n")
// Check that the expected alert is fired with the right values
expectedDegradedMessage := fmt.Sprintf(`Node %s is reporting: "failed to update OS to %s`,
node.GetName(), digestedImage)
expectedAlertLabels := expectedAlertValues{"severity": o.Equal(expectedAlertSeverity)}
expectedAlertAnnotationDescription := fmt.Sprintf("Error detected in pivot logs on %s , upgrade may be blocked. For more details: oc logs -f -n openshift-machine-config-operator machine-config-daemon-",
node.GetName())
expectedAlertAnnotationSummary := "Alerts the user when an error is detected upon pivot. This triggers if the pivot errors are above zero for 2 minutes."
expectedAlertAnnotations := expectedAlertValues{
"description": o.ContainSubstring(expectedAlertAnnotationDescription),
"summary": o.Equal(expectedAlertAnnotationSummary),
}
params := checkFiredAlertParams{
expectedAlertName: expectedAlertName,
expectedDegradedMessage: regexp.QuoteMeta(expectedDegradedMessage),
expectedAlertLabels: expectedAlertLabels,
expectedAlertAnnotations: expectedAlertAnnotations,
pendingDuration: alertFiredAfter,
stillPresentDuration: 0, // We skip this validation to make the test faster
}
checkFiredAlert(oc, coMcp, params)
exutil.By("Fix the rpm-ostree rebase process in the node")
o.Expect(FixRebaseInNode(&node)).To(o.Succeed(),
"Error fixing the rpm-ostree rebase process in node %s", node.GetName())
logger.Infof("OK!\n")
checkFixedAlert(oc, coMcp, expectedAlertName)
})
| |||||
test case
|
openshift/openshift-tests-private
|
b948d795-e6ea-43ff-b79c-3b84444b70ef
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-62075-[OnCLayer] MCCPoolAlert. Test support for a node pool hierarchy [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_alerts.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-62075-[OnCLayer] MCCPoolAlert. Test support for a node pool hierarchy [Disruptive]", func() {
var (
iMcpName = "infra"
expectedAlertName = "MCCPoolAlert"
expectedAlertSeverity = "warning"
masterNode = mMcp.GetNodesOrFail()[0]
mcc = NewController(oc.AsAdmin())
)
numMasterNodes, err := mMcp.getMachineCount()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the machinecount field in % MCP", mMcp.GetName())
exutil.By("Add label as infra to the existing master node")
infraLabel := "node-role.kubernetes.io/infra"
defer func() {
// ignore output, just focus on error handling, if error is occurred, fail this case
_, deletefailure := masterNode.DeleteLabel(infraLabel)
o.Expect(deletefailure).NotTo(o.HaveOccurred())
}()
err = masterNode.AddLabel(infraLabel, "")
o.Expect(err).NotTo(o.HaveOccurred(),
"Could not add the label %s to node %s", infraLabel, masterNode)
logger.Infof("OK!\n")
exutil.By("Create custom infra mcp")
iMcpTemplate := generateTemplateAbsolutePath("custom-machine-config-pool.yaml")
iMcp := NewMachineConfigPool(oc.AsAdmin(), iMcpName)
iMcp.template = iMcpTemplate
// We need to wait for the label to be delete before removing the MCP. Otherwise the worker pool
// becomes Degraded.
defer func() {
_, deletefailure := masterNode.DeleteLabel(infraLabel)
// We don't fail if there is a problem because we need to delete the infra MCP
// We will try to remove the label again in the next defer section
if deletefailure != nil {
logger.Errorf("Error deleting label '%s' in node '%s'", infraLabel, masterNode.GetName())
}
_ = masterNode.WaitForLabelRemoved(infraLabel)
iMcp.delete()
}()
iMcp.create()
logger.Infof("OK!\n")
exutil.By("Check that the controller logs are reporting the conflict")
o.Eventually(
mcc.GetLogs, "5m", "10s",
).Should(o.ContainSubstring("Found master node that matches selector for custom pool %s, defaulting to master. This node will not have any custom role configuration as a result. Please review the node to make sure this is intended", iMcpName),
"The MCO controller is not reporting a machine config pool conflict in the logs")
logger.Infof("OK!\n")
exutil.By(`Check that the master node remains in master pool and is moved to "infra" pool or simply removed from master pool`)
o.Consistently(mMcp.getMachineCount, "30s", "10s").Should(o.Equal(numMasterNodes),
"The number of machines in the MCP has changed!\n%s", mMcp.PrettyString())
o.Consistently(iMcp.getMachineCount, "30s", "10s").Should(o.Equal(0),
"No node should be added to the custom pool!\n%s", iMcp.PrettyString())
logger.Infof("OK!\n")
// Check that the expected alert is fired with the right values
exutil.By(`Check that the right alert was triggered`)
expectedAlertLabels := expectedAlertValues{"severity": o.Equal(expectedAlertSeverity)}
expectedAlertAnnotationDescription := fmt.Sprintf("Node .* has triggered a pool alert due to a label change")
expectedAlertAnnotationSummary := "Triggers when nodes in a pool have overlapping labels such as master, worker, and a custom label therefore a choice must be made as to which is honored."
expectedAlertAnnotations := expectedAlertValues{
"description": o.MatchRegexp(expectedAlertAnnotationDescription),
"summary": o.Equal(expectedAlertAnnotationSummary),
}
params := checkFiredAlertParams{
expectedAlertName: expectedAlertName,
expectedAlertLabels: expectedAlertLabels,
expectedAlertAnnotations: expectedAlertAnnotations,
pendingDuration: 0,
stillPresentDuration: 0, // We skip this validation to make the test faster
}
checkFiredAlert(oc, nil, params)
logger.Infof("OK!\n")
exutil.By("Remove the label from the master node in order to fix the problem")
_, err = masterNode.DeleteLabel(infraLabel)
o.Expect(err).NotTo(o.HaveOccurred(),
"Could not delete the %s label in node %s", infraLabel, masterNode)
o.Expect(
masterNode.WaitForLabelRemoved(infraLabel),
).To(o.Succeed(),
"The label %s was not removed from node %s", infraLabel, masterNode)
logger.Infof("OK!\n")
exutil.By("Check that the alert is not triggered anymore")
checkFixedAlert(oc, coMcp, expectedAlertName)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
09408030-1f54-474e-8b1b-7f5c875e2745
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-73841-[P1][OnCLayer] KubeletHealthState alert [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_alerts.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-73841-[P1][OnCLayer] KubeletHealthState alert [Disruptive]", func() {
var (
node = mcp.GetSortedNodesOrFail()[0]
fixed = false
expectedAlertName = "KubeletHealthState"
expectedAlertSeverity = "warning"
expectedAlertAnnotationDescription = "Kubelet health failure threshold reached"
expectedAlertAnnotationSummary = "This keeps track of Kubelet health failures, and tallies them. The warning is triggered if 2 or more failures occur."
)
exutil.By("Break kubelet")
// We stop the kubelet service to break the node and after 5 minutes we start it again to fix the node
go func() {
defer g.GinkgoRecover()
_, err := node.DebugNodeWithChroot("sh", "-c", "systemctl stop kubelet.service; sleep 300; systemctl start kubelet.service")
o.Expect(err).NotTo(o.HaveOccurred(),
"Error stopping and restarting kubelet in %s", node)
logger.Infof("Kubelet service has been restarted again")
fixed = true
}()
logger.Infof("OK!\n")
expectedAlertLabels := expectedAlertValues{
"severity": o.Equal(expectedAlertSeverity),
}
expectedAlertAnnotations := expectedAlertValues{
"description": o.MatchRegexp(expectedAlertAnnotationDescription),
"summary": o.Equal(expectedAlertAnnotationSummary),
}
params := checkFiredAlertParams{
expectedAlertName: expectedAlertName,
expectedAlertLabels: expectedAlertLabels,
expectedAlertAnnotations: expectedAlertAnnotations,
pendingDuration: 0,
stillPresentDuration: 0, // We skip this validation to make the test faster
}
checkFiredAlert(oc, nil, params)
exutil.By("Wait for the kubelet service to be restarted")
o.Eventually(func() bool { return fixed }, "5m", "20s").Should(o.BeTrue(), "Kubelet service was not restarted")
o.Eventually(&node).Should(HaveConditionField("Ready", "status", TrueString), "Node %s didn't become ready after kubelet was restarted", node)
logger.Infof("OK!\n")
exutil.By("Check that the alert is not triggered anymore")
checkFixedAlert(oc, mcp, expectedAlertName)
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
3b97b88c-02bc-49a7-96bb-75d1e9369ab5
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-75862-[P2][OnCLayer] Add alert for users of deprecating the Image Registry workaround [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_alerts.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-75862-[P2][OnCLayer] Add alert for users of deprecating the Image Registry workaround [Disruptive]", func() {
var (
expectedAlertName = "MCODrainOverrideConfigMapAlert"
expectedAlertSeverity = "warning"
expectedAlertAnnotationDescription = "Image Registry Drain Override configmap has been detected. Please use the Node Disruption Policy feature to control the cluster's drain behavior as the configmap method is currently deprecated and will be removed in a future release."
expectedAlertAnnotationSummary = "Alerts the user to the presence of a drain override configmap that is being deprecated and removed in a future release."
overrideCMName = "image-registry-override-drain"
)
exutil.By("Create an image-registry-override-drain configmap")
defer oc.AsAdmin().Run("delete").Args("configmap", "-n", MachineConfigNamespace, overrideCMName).Execute()
o.Expect(
oc.AsAdmin().Run("create").Args("configmap", "-n", MachineConfigNamespace, overrideCMName).Execute(),
).To(o.Succeed(), "Error creating the image-registry override configmap")
logger.Infof("OK!\n")
expectedAlertLabels := expectedAlertValues{
"severity": o.Equal(expectedAlertSeverity),
}
expectedAlertAnnotations := expectedAlertValues{
"description": o.MatchRegexp(expectedAlertAnnotationDescription),
"summary": o.Equal(expectedAlertAnnotationSummary),
}
params := checkFiredAlertParams{
expectedAlertName: expectedAlertName,
expectedAlertLabels: expectedAlertLabels,
expectedAlertAnnotations: expectedAlertAnnotations,
pendingDuration: 0,
stillPresentDuration: 0, // We skip this validation to make the test faster
}
checkFiredAlert(oc, nil, params)
exutil.By("Delete the image-registry-override-drain configmap")
o.Expect(
oc.AsAdmin().Run("delete").Args("configmap", "-n", MachineConfigNamespace, overrideCMName).Execute(),
).To(o.Succeed(), "Error deleting the image-registry override configmap")
logger.Infof("OK!\n")
exutil.By("Check that the alert is not triggered anymore")
checkFixedAlert(oc, mcp, expectedAlertName)
logger.Infof("OK!\n")
})
| ||||||
test
|
openshift/openshift-tests-private
|
2f805801-dbaf-4b2f-b652-6110512d27e4
|
mco_bootimages
|
import (
"fmt"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_bootimages.go
|
package mco
import (
"fmt"
"regexp"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
var _ = g.Describe("[sig-mco] MCO Bootimages", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("mco-scale", exutil.KubeConfigPath())
// worker MachineConfigPool
wMcp *MachineConfigPool
machineConfiguration *MachineConfiguration
)
g.JustBeforeEach(func() {
// Skip if no machineset
skipTestIfWorkersCannotBeScaled(oc.AsAdmin())
// Bootimages Update functionality is only available in GCP(GA) and AWS(GA)
skipTestIfSupportedPlatformNotMatched(oc, GCPPlatform, AWSPlatform)
IsFeaturegateEnabled(oc, "ManagedBootImages")
IsFeaturegateEnabled(oc, "ManagedBootImagesAWS")
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
machineConfiguration = GetMachineConfiguration(oc.AsAdmin())
preChecks(oc)
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74238-[P1][OnCLayer] BootImages not updated by default [Disruptive]", func() {
var (
fakeImageName = "fake-coreos-bootimage-name"
duplicatedMachinesetName = "cloned-tc-74238"
firstMachineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
)
exutil.By("Remove ManagedBootImages section from MachineConfiguration resource")
defer machineConfiguration.SetSpec(machineConfiguration.GetSpecOrFail())
o.Expect(
machineConfiguration.RemoveManagedBootImagesConfig(),
).To(o.Succeed(), "Error configuring an empty managedBootImage in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Duplicate machineset for testing")
machineSet, dErr := firstMachineSet.Duplicate(duplicatedMachinesetName)
o.Expect(dErr).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
defer machineSet.Delete()
logger.Infof("OK!\n")
exutil.By("Patch coreos boot image in MachineSet")
o.Expect(machineSet.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error patching the value of the coreos boot image in %s", machineSet)
logger.Infof("OK!\n")
exutil.By("Check that the MachineSet is not updated by MCO by default")
o.Consistently(machineSet.GetCoreOsBootImage, "3m", "20s").Should(o.Equal(fakeImageName),
"The machineset should not be updated by MCO if the functionality is not enabled in the MachineConfiguration resource. %s", machineSet.PrettyString())
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74240-[P2][OnCLayer] ManagedBootImages. Restore All MachineSet images [Disruptive]", func() {
var (
fakeImageName = "fake-coreos-bootimage-name"
coreosBootimagesCM = NewConfigMap(oc.AsAdmin(), MachineConfigNamespace, "coreos-bootimages")
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
clonedMSName = "cloned-tc-74240"
clonedWrongBootImageMSName = "cloned-tc-74240-wrong-boot-image"
clonedOwnedMSName = "cloned-tc-74240-owned"
platform = exutil.CheckPlatform(oc)
region = getCurrentRegionOrFail(oc)
)
exutil.By("Opt-in boot images update")
defer machineConfiguration.SetSpec(machineConfiguration.GetSpecOrFail())
o.Expect(
machineConfiguration.SetAllManagedBootImagesConfig(),
).To(o.Succeed(), "Error configuring ALL managedBootImages in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Clone first machineset")
clonedMS, err := machineSet.Duplicate(clonedMSName)
defer clonedMS.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("OK!\n")
exutil.By("Clone first machineset but using a wrong ")
clonedWrongImageMS, err := DuplicateMachineSetWithCustomBootImage(machineSet, fakeImageName, clonedWrongBootImageMSName)
defer clonedWrongImageMS.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s using a custom boot image", machineSet)
logger.Infof("OK!\n")
exutil.By("Clone first machineset and set an owner for the cloned machineset")
logger.Infof("Cloning machineset")
clonedOwnedMS, err := machineSet.Duplicate(clonedOwnedMSName)
defer clonedOwnedMS.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("Setting a fake owner")
o.Expect(
clonedOwnedMS.Patch("merge", `{"metadata":{"ownerReferences": [{"apiVersion": "fake","blockOwnerDeletion": true,"controller": true,"kind": "fakekind","name": "master","uid": "fake-uuid"}]}}`),
).To(o.Succeed(), "Error patching %s with a fake owner", clonedOwnedMS)
logger.Infof("OK!\n")
exutil.By("All machinesets should use the right boot image")
for _, ms := range NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail() {
logger.Infof("Checking boot image in machineset %s", ms.GetName())
currentCoreOsBootImage := getCoreOsBootImageFromConfigMapOrFail(platform, region, *machineSet.GetArchitectureOrFail(), coreosBootimagesCM)
logger.Infof("Current coreOsBootImage: %s", currentCoreOsBootImage)
o.Eventually(ms.GetCoreOsBootImage, "5m", "20s").Should(o.ContainSubstring(currentCoreOsBootImage),
"%s was NOT updated to use the right boot image", ms)
}
logger.Infof("OK!\n")
exutil.By("Patch cloned machinesets to use a wrong boot image")
o.Expect(clonedMS.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedMS)
o.Expect(clonedWrongImageMS.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedWrongImageMS)
o.Expect(clonedOwnedMS.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedOwnedMS)
logger.Infof("OK!\n")
exutil.By("All machinesets should use the right boot image except the one with an owner")
for _, ms := range NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail() {
logger.Infof("Checking boot image in machineset %s", ms.GetName())
currentCoreOsBootImage := getCoreOsBootImageFromConfigMapOrFail(platform, region, *machineSet.GetArchitectureOrFail(), coreosBootimagesCM)
logger.Infof("Current coreOsBootImage: %s", currentCoreOsBootImage)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the currently configured coreos boot image")
if ms.GetName() == clonedOwnedMSName {
o.Consistently(ms.GetCoreOsBootImage, "15s", "5s").Should(o.Equal(fakeImageName),
"%s was patched and it is using the right boot image. Machinesets with owners should NOT be patched.", ms)
} else {
o.Eventually(ms.GetCoreOsBootImage, "5m", "20s").Should(o.ContainSubstring(currentCoreOsBootImage),
"%s was NOT updated to use the right boot image", ms)
o.Eventually(ms.GetUserDataSecret, "1m", "20s").Should(o.ContainSubstring("worker-user-data-managed"),
"%s was NOT updated to use the right user-data secret", ms)
}
}
logger.Infof("OK!\n")
exutil.By("Scale up one of the fixed machinesets to make sure that they are working fine")
logger.Infof("Scaling up machineset %s", clonedMS.GetName())
defer wMcp.waitForComplete()
defer clonedMS.ScaleTo(0)
o.Expect(clonedMS.ScaleTo(1)).To(o.Succeed(),
"Error scaling up MachineSet %s", clonedMS.GetName())
logger.Infof("Waiting %s machineset for being ready", clonedMS)
o.Eventually(clonedMS.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", clonedMS.GetName())
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74239-[OnCLayer] ManagedBootImages. Restore Partial MachineSet images [Disruptive]", func() {
var (
fakeImageName = "fake-coreos-bootimage-name"
coreosBootimagesCM = NewConfigMap(oc.AsAdmin(), MachineConfigNamespace, "coreos-bootimages")
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
clonedMSLabelName = "cloned-tc-74239-label"
clonedMSNoLabelName = "cloned-tc-74239-no-label"
clonedMSLabelOwnedName = "cloned-tc-74239-label-owned"
labelName = "test"
labelValue = "update"
platform = exutil.CheckPlatform(oc)
region = getCurrentRegionOrFail(oc)
)
exutil.By("Opt-in boot images update")
defer machineConfiguration.SetSpec(machineConfiguration.GetSpecOrFail())
o.Expect(
machineConfiguration.SetPartialManagedBootImagesConfig(labelName, labelValue),
).To(o.Succeed(), "Error configuring Partial managedBootImages in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Clone the first machineset twice")
clonedMSLabel, err := machineSet.Duplicate(clonedMSLabelName)
defer clonedMSLabel.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
clonedMSNoLabel, err := machineSet.Duplicate(clonedMSNoLabelName)
defer clonedMSNoLabel.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("OK!\n")
exutil.By("Clone first machineset again and set an owner for the cloned machineset")
logger.Infof("Cloning machineset")
clonedMSLabelOwned, err := machineSet.Duplicate(clonedMSLabelOwnedName)
defer clonedMSLabelOwned.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("Setting a fake owner")
o.Expect(
clonedMSLabelOwned.Patch("merge", `{"metadata":{"ownerReferences": [{"apiVersion": "fake","blockOwnerDeletion": true,"controller": true,"kind": "fakekind","name": "master","uid": "fake-uuid"}]}}`),
).To(o.Succeed(), "Error patching %s with a fake owner", clonedMSLabelOwned)
logger.Infof("OK!\n")
exutil.By("Label one of the cloned images and the clonned image with the owner configuration")
o.Expect(clonedMSLabel.AddLabel(labelName, labelValue)).To(o.Succeed(),
"Error labeling %s", clonedMSLabel)
o.Expect(clonedMSLabelOwned.AddLabel(labelName, labelValue)).To(o.Succeed(),
"Error labeling %s", clonedMSLabel)
logger.Infof("OK!\n")
exutil.By("Patch the clonned machineset to configure a new boot image")
o.Expect(clonedMSLabel.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedMSLabel)
o.Expect(clonedMSNoLabel.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedMSNoLabel)
o.Expect(clonedMSLabelOwned.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedMSLabelOwned)
logger.Infof("OK!\n")
exutil.By("The labeled machineset without owner should be updated")
currentCoreOsBootImage := getCoreOsBootImageFromConfigMapOrFail(platform, region, *machineSet.GetArchitectureOrFail(), coreosBootimagesCM)
logger.Infof("Current coreOsBootImage: %s", currentCoreOsBootImage)
o.Eventually(clonedMSLabel.GetCoreOsBootImage, "5m", "20s").Should(o.ContainSubstring(currentCoreOsBootImage),
"%s was NOT updated to use the right boot image", clonedMSLabel)
o.Eventually(clonedMSLabel.GetUserDataSecret, "1m", "20s").Should(o.ContainSubstring("worker-user-data-managed"),
"%s was NOT updated to use the right user-data secret", clonedMSLabel)
logger.Infof("OK!\n")
exutil.By("The labeled machineset with owner should NOT be updated")
o.Consistently(clonedMSLabelOwned.GetCoreOsBootImage, "15s", "5s").Should(o.Equal(fakeImageName),
"%s was patched and it is using the right boot image. Machinesets with owners should NOT be patched.", clonedMSLabelOwned)
logger.Infof("OK!\n")
exutil.By("The machineset without label should NOT be updated")
o.Consistently(clonedMSNoLabel.GetCoreOsBootImage, "15s", "5s").Should(o.Equal(fakeImageName),
"%s was patched and it is using the right boot image. Machinesets with owners should NOT be patched.", clonedMSNoLabel)
logger.Infof("OK!\n")
exutil.By("Scale up the fixed machinessetset to make sure that it is working fine")
logger.Infof("Scaling up machineset %s", clonedMSLabel.GetName())
defer wMcp.waitForComplete()
defer clonedMSLabel.ScaleTo(0)
o.Expect(clonedMSLabel.ScaleTo(1)).To(o.Succeed(),
"Error scaling up MachineSet %s", clonedMSLabel.GetName())
logger.Infof("Waiting %s machineset for being ready", clonedMSLabel)
o.Eventually(clonedMSLabel.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", clonedMSLabel.GetName())
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74764-[P1][OnCLayer] ManagedBootImages. Delete machineset when error [Disruptive]", func() {
var (
machineConfiguration = GetMachineConfiguration(oc.AsAdmin())
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
clonedMSName = "cloned-tc-74764-copy"
labelName = "test"
labelValue = "update"
expectedFailedMessageRegexp = regexp.QuoteMeta("Error(s): error syncing MAPI MachineSet " +
clonedMSName +
": unexpected OwnerReference: fakekind/master. Please remove this machineset from boot image management to avoid errors")
expectedFailedProgressMessage = "Reconciled 0 of 1 MAPI MachineSets | Reconciled 0 of 0 CAPI MachineSets | Reconciled 0 of 0 CAPI MachineDeployments"
expectedOKMessage = "0 Degraded MAPI MachineSets | 0 Degraded CAPI MachineSets | 0 CAPI MachineDeployments"
)
exutil.By("Opt-in boot images update")
defer machineConfiguration.SetSpec(machineConfiguration.GetSpecOrFail())
o.Expect(
machineConfiguration.SetPartialManagedBootImagesConfig(labelName, labelValue),
).To(o.Succeed(), "Error configuring Partial managedBootImages in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Clone the first machineset")
clonedMS, err := machineSet.Duplicate(clonedMSName)
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
defer clonedMS.Delete()
logger.Infof("OK!\n")
exutil.By("Add a fake owner to the new cloned machineset")
o.Expect(
clonedMS.Patch("merge", `{"metadata":{"ownerReferences": [{"apiVersion": "fake","blockOwnerDeletion": true,"controller": true,"kind": "fakekind","name": "master","uid": "fake-uuid"}]}}`),
).To(o.Succeed(), "Error patching %s with a fake owner", clonedMS)
logger.Infof("OK!\n")
exutil.By("Label the cloned machineset so that its boot image is updated by MCO")
o.Expect(clonedMS.AddLabel(labelName, labelValue)).To(o.Succeed(),
"Error labeling %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Check that an error is reported in the machineconfiguration resource")
o.Eventually(machineConfiguration, "5m", "10s").Should(HaveConditionField("BootImageUpdateDegraded", "status", "True"),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "10s").Should(HaveConditionField("BootImageUpdateDegraded", "message", o.MatchRegexp(expectedFailedMessageRegexp)),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
logger.Infof("OK!\n")
exutil.By("Check reported progress in machineconfiguration resource")
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateProgressing", "message", expectedFailedProgressMessage),
"Progress message is not the expected one.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateProgressing", "status", "False"),
"Progress status is not the expected one.\n%s", machineConfiguration.PrettyString())
logger.Infof("OK!\n")
exutil.By("Delete the new cloned machineset")
o.Expect(clonedMS.Delete()).To(o.Succeed(), "Error deleting %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Check the machineconfiguration resource is not reporting errors anymore and the progress is OK")
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "status", "False"),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "message", expectedOKMessage),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateProgressing", "status", "False"),
"Progress status is not the expected one.\n%s", machineConfiguration.PrettyString())
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74751-[P2][OnCLayer] ManagedBootImages. Fix errors [Disruptive]", func() {
var (
coreosBootimagesCM = NewConfigMap(oc.AsAdmin(), MachineConfigNamespace, "coreos-bootimages")
machineConfiguration = GetMachineConfiguration(oc.AsAdmin())
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
clonedMSName = "cloned-tc-74751-copy"
labelName = "test"
labelValue = "update"
fakearch = "fake-arch"
expectedFailedMessageRegexp = regexp.QuoteMeta("Error(s): error syncing MAPI MachineSet " +
clonedMSName +
": failed to fetch arch during machineset sync: invalid architecture value found in annotation: kubernetes.io/arch=" + fakearch)
platform = exutil.CheckPlatform(oc)
region = getCurrentRegionOrFail(oc)
)
exutil.By("Opt-in boot images update")
defer machineConfiguration.SetSpec(machineConfiguration.GetSpecOrFail())
o.Expect(
machineConfiguration.SetPartialManagedBootImagesConfig(labelName, labelValue),
).To(o.Succeed(), "Error configuring Partial managedBootImages in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Clone the first machineset")
clonedMS, err := machineSet.Duplicate(clonedMSName)
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
defer clonedMS.Delete()
logger.Infof("OK!\n")
exutil.By("Set a wrong architecture in the cloned image")
o.Expect(clonedMS.SetArchitecture(fakearch)).To(o.Succeed(), "Error setting a fake architecture in %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Set a wrong boot image in the cloned image")
o.Expect(clonedMS.SetCoreOsBootImage("fake-image")).To(o.Succeed(), "Error setting a fake boot image in %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Check that no failures are being reported")
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "status", "False"),
"Expected %s not to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateProgressing", "status", "False"),
"Expected %s not to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
logger.Infof("OK!\n")
exutil.By("Label the cloned machineset so that its boot image is updated by MCO")
o.Expect(clonedMS.AddLabel(labelName, labelValue)).To(o.Succeed(),
"Error labeling %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Check that an error is reported in the machineconfiguration resource and that there is no progress")
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "status", "True"),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "message", o.MatchRegexp(expectedFailedMessageRegexp)),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateProgressing", "status", "False"),
"Progress status is not the expected one.\n%s", machineConfiguration.PrettyString())
// since it will be in "progressing" status for a very short time, we cant poll the value. We need to use the lasttransition date
lastProgressTransition := machineConfiguration.GetOrFail(`{.status.conditions[?(@.type=="BootImageUpdateProgressing")].lastTransitionTime}`)
logger.Infof("OK!\n")
exutil.By("Set the right architecture in the cloneed machineset")
o.Expect(clonedMS.SetArchitecture(machineSet.GetArchitectureOrFail().String())).To(o.Succeed(), "Error fixing the problem in the architecture in %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Check that no error is reported anymore in the machineconfiguration resource and the progress was OK")
// We need to poll requently since it will be on "progressing" status a very short time
o.Eventually(machineConfiguration, "20s", "1s").ShouldNot(HaveConditionField("BootImageUpdateProgressing", "lastTransitionTime", lastProgressTransition),
"Progress status did not change, but it should have been moved to 'true' and back to 'false' .\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "2m", "10s").Should(HaveConditionField("BootImageUpdateProgressing", "status", "False"),
"Progress status is not the expected one.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "status", "False"),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
logger.Infof("OK!\n")
exutil.By("Check that the boot image was updated")
currentCoreOsBootImage := getCoreOsBootImageFromConfigMapOrFail(platform, region, *machineSet.GetArchitectureOrFail(), coreosBootimagesCM)
logger.Infof("Current coreOsBootImage: %s", currentCoreOsBootImage)
o.Eventually(clonedMS.GetCoreOsBootImage, "5m", "20s").Should(o.ContainSubstring(currentCoreOsBootImage),
"%s was NOT updated to use the right boot image", clonedMS)
logger.Infof("OK!\n")
})
})
func DuplicateMachineSetWithCustomBootImage(ms MachineSet, newBootImage, newName string) (*MachineSet, error) {
var (
platform = exutil.CheckPlatform(ms.GetOC().AsAdmin())
coreOSBootImagePath = GetCoreOSBootImagePath(platform)
)
// Patch is given like /spec/template/spec/providerSpec/value/ami/id
// but in sjson library we need the path like spec.template.spec.providerSpec.valude.ami.id
// so we transform the string
jsonCoreOSBootImagePath := strings.ReplaceAll(strings.TrimPrefix(coreOSBootImagePath, "/"), "/", ".")
res, err := CloneResource(&ms, newName, ms.GetNamespace(),
// Extra modifications to
// 1. Create the resource with 0 replicas
// 2. modify the selector matchLabels
// 3. modify the selector template metadata labels
// 4. set the provided boot image
func(resString string) (string, error) {
newResString, err := sjson.Set(resString, "spec.replicas", 0)
if err != nil {
return "", err
}
newResString, err = sjson.Set(newResString, `spec.selector.matchLabels.machine\.openshift\.io/cluster-api-machineset`, newName)
if err != nil {
return "", err
}
newResString, err = sjson.Set(newResString, `spec.template.metadata.labels.machine\.openshift\.io/cluster-api-machineset`, newName)
if err != nil {
return "", err
}
newResString, err = sjson.Set(newResString, jsonCoreOSBootImagePath, newBootImage)
if err != nil {
return "", err
}
return newResString, nil
},
)
if err != nil {
return nil, err
}
logger.Infof("A new machineset %s has been created by cloning %s", res.GetName(), ms.GetName())
return NewMachineSet(ms.oc, res.GetNamespace(), res.GetName()), nil
}
// getCoreOsBootImageFromConfigMap look for the configured coreOs boot image in given configmap
func getCoreOsBootImageFromConfigMap(platform, region string, arch architecture.Architecture, coreosBootimagesCM *ConfigMap) (string, error) {
var (
coreOsBootImagePath string
// transform amd64 naming to x86_64 naming
stringArch = convertArch(arch)
)
logger.Infof("Looking for coreos boot image for architecture %s in %s", stringArch, coreosBootimagesCM)
streamJSON, err := coreosBootimagesCM.GetDataValue("stream")
if err != nil {
return "", err
}
parsedStream := gjson.Parse(streamJSON)
switch platform {
case AWSPlatform:
if region == "" {
return "", fmt.Errorf("Region is empty for platform %s. The region is mandatory if we want to get the boot image value", platform)
}
coreOsBootImagePath = fmt.Sprintf(`.architectures.%s.images.%s.regions."%s".image`, stringArch, platform, region)
case GCPPlatform:
coreOsBootImagePath = fmt.Sprintf(`architectures.%s.images.%s.name`, stringArch, platform)
default:
return "", fmt.Errorf("Machineset.GetCoreOsBootImage method is only supported for GCP and AWS platforms")
}
currentCoreOsBootImage := parsedStream.Get(coreOsBootImagePath).String()
if currentCoreOsBootImage == "" {
logger.Warnf("The coreos boot image for architecture %s in %s IS EMPTY", stringArch, coreosBootimagesCM)
}
return currentCoreOsBootImage, nil
}
func getCoreOsBootImageFromConfigMapOrFail(platform, region string, arch architecture.Architecture, coreosBootimagesCM *ConfigMap) string {
image, err := getCoreOsBootImageFromConfigMap(platform, region, arch, coreosBootimagesCM)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the boot image from %s for platform %s and arch %s", coreosBootimagesCM, platform, arch)
return image
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
8ebcccce-b317-4ab0-8abe-bf2c84c2bf53
|
DuplicateMachineSetWithCustomBootImage
|
['"strings"', '"github.com/tidwall/sjson"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_bootimages.go
|
func DuplicateMachineSetWithCustomBootImage(ms MachineSet, newBootImage, newName string) (*MachineSet, error) {
var (
platform = exutil.CheckPlatform(ms.GetOC().AsAdmin())
coreOSBootImagePath = GetCoreOSBootImagePath(platform)
)
// Patch is given like /spec/template/spec/providerSpec/value/ami/id
// but in sjson library we need the path like spec.template.spec.providerSpec.valude.ami.id
// so we transform the string
jsonCoreOSBootImagePath := strings.ReplaceAll(strings.TrimPrefix(coreOSBootImagePath, "/"), "/", ".")
res, err := CloneResource(&ms, newName, ms.GetNamespace(),
// Extra modifications to
// 1. Create the resource with 0 replicas
// 2. modify the selector matchLabels
// 3. modify the selector template metadata labels
// 4. set the provided boot image
func(resString string) (string, error) {
newResString, err := sjson.Set(resString, "spec.replicas", 0)
if err != nil {
return "", err
}
newResString, err = sjson.Set(newResString, `spec.selector.matchLabels.machine\.openshift\.io/cluster-api-machineset`, newName)
if err != nil {
return "", err
}
newResString, err = sjson.Set(newResString, `spec.template.metadata.labels.machine\.openshift\.io/cluster-api-machineset`, newName)
if err != nil {
return "", err
}
newResString, err = sjson.Set(newResString, jsonCoreOSBootImagePath, newBootImage)
if err != nil {
return "", err
}
return newResString, nil
},
)
if err != nil {
return nil, err
}
logger.Infof("A new machineset %s has been created by cloning %s", res.GetName(), ms.GetName())
return NewMachineSet(ms.oc, res.GetNamespace(), res.GetName()), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
8994b8cc-389f-4985-b0c3-72214af090ed
|
getCoreOsBootImageFromConfigMap
|
['"fmt"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', '"github.com/tidwall/gjson"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_bootimages.go
|
func getCoreOsBootImageFromConfigMap(platform, region string, arch architecture.Architecture, coreosBootimagesCM *ConfigMap) (string, error) {
var (
coreOsBootImagePath string
// transform amd64 naming to x86_64 naming
stringArch = convertArch(arch)
)
logger.Infof("Looking for coreos boot image for architecture %s in %s", stringArch, coreosBootimagesCM)
streamJSON, err := coreosBootimagesCM.GetDataValue("stream")
if err != nil {
return "", err
}
parsedStream := gjson.Parse(streamJSON)
switch platform {
case AWSPlatform:
if region == "" {
return "", fmt.Errorf("Region is empty for platform %s. The region is mandatory if we want to get the boot image value", platform)
}
coreOsBootImagePath = fmt.Sprintf(`.architectures.%s.images.%s.regions."%s".image`, stringArch, platform, region)
case GCPPlatform:
coreOsBootImagePath = fmt.Sprintf(`architectures.%s.images.%s.name`, stringArch, platform)
default:
return "", fmt.Errorf("Machineset.GetCoreOsBootImage method is only supported for GCP and AWS platforms")
}
currentCoreOsBootImage := parsedStream.Get(coreOsBootImagePath).String()
if currentCoreOsBootImage == "" {
logger.Warnf("The coreos boot image for architecture %s in %s IS EMPTY", stringArch, coreosBootimagesCM)
}
return currentCoreOsBootImage, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
4617fcc4-efab-4085-a9f7-daeb1a678e34
|
getCoreOsBootImageFromConfigMapOrFail
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_bootimages.go
|
func getCoreOsBootImageFromConfigMapOrFail(platform, region string, arch architecture.Architecture, coreosBootimagesCM *ConfigMap) string {
image, err := getCoreOsBootImageFromConfigMap(platform, region, arch, coreosBootimagesCM)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the boot image from %s for platform %s and arch %s", coreosBootimagesCM, platform, arch)
return image
}
|
mco
| ||||
test case
|
openshift/openshift-tests-private
|
dddf1f21-85b6-4be3-a733-8f9fed860b2e
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74238-[P1][OnCLayer] BootImages not updated by default [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_bootimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74238-[P1][OnCLayer] BootImages not updated by default [Disruptive]", func() {
var (
fakeImageName = "fake-coreos-bootimage-name"
duplicatedMachinesetName = "cloned-tc-74238"
firstMachineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
)
exutil.By("Remove ManagedBootImages section from MachineConfiguration resource")
defer machineConfiguration.SetSpec(machineConfiguration.GetSpecOrFail())
o.Expect(
machineConfiguration.RemoveManagedBootImagesConfig(),
).To(o.Succeed(), "Error configuring an empty managedBootImage in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Duplicate machineset for testing")
machineSet, dErr := firstMachineSet.Duplicate(duplicatedMachinesetName)
o.Expect(dErr).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
defer machineSet.Delete()
logger.Infof("OK!\n")
exutil.By("Patch coreos boot image in MachineSet")
o.Expect(machineSet.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error patching the value of the coreos boot image in %s", machineSet)
logger.Infof("OK!\n")
exutil.By("Check that the MachineSet is not updated by MCO by default")
o.Consistently(machineSet.GetCoreOsBootImage, "3m", "20s").Should(o.Equal(fakeImageName),
"The machineset should not be updated by MCO if the functionality is not enabled in the MachineConfiguration resource. %s", machineSet.PrettyString())
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
1abecfba-d29c-4fc1-b94c-582eb939af0c
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74240-[P2][OnCLayer] ManagedBootImages. Restore All MachineSet images [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_bootimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74240-[P2][OnCLayer] ManagedBootImages. Restore All MachineSet images [Disruptive]", func() {
var (
fakeImageName = "fake-coreos-bootimage-name"
coreosBootimagesCM = NewConfigMap(oc.AsAdmin(), MachineConfigNamespace, "coreos-bootimages")
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
clonedMSName = "cloned-tc-74240"
clonedWrongBootImageMSName = "cloned-tc-74240-wrong-boot-image"
clonedOwnedMSName = "cloned-tc-74240-owned"
platform = exutil.CheckPlatform(oc)
region = getCurrentRegionOrFail(oc)
)
exutil.By("Opt-in boot images update")
defer machineConfiguration.SetSpec(machineConfiguration.GetSpecOrFail())
o.Expect(
machineConfiguration.SetAllManagedBootImagesConfig(),
).To(o.Succeed(), "Error configuring ALL managedBootImages in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Clone first machineset")
clonedMS, err := machineSet.Duplicate(clonedMSName)
defer clonedMS.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("OK!\n")
exutil.By("Clone first machineset but using a wrong ")
clonedWrongImageMS, err := DuplicateMachineSetWithCustomBootImage(machineSet, fakeImageName, clonedWrongBootImageMSName)
defer clonedWrongImageMS.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s using a custom boot image", machineSet)
logger.Infof("OK!\n")
exutil.By("Clone first machineset and set an owner for the cloned machineset")
logger.Infof("Cloning machineset")
clonedOwnedMS, err := machineSet.Duplicate(clonedOwnedMSName)
defer clonedOwnedMS.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("Setting a fake owner")
o.Expect(
clonedOwnedMS.Patch("merge", `{"metadata":{"ownerReferences": [{"apiVersion": "fake","blockOwnerDeletion": true,"controller": true,"kind": "fakekind","name": "master","uid": "fake-uuid"}]}}`),
).To(o.Succeed(), "Error patching %s with a fake owner", clonedOwnedMS)
logger.Infof("OK!\n")
exutil.By("All machinesets should use the right boot image")
for _, ms := range NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail() {
logger.Infof("Checking boot image in machineset %s", ms.GetName())
currentCoreOsBootImage := getCoreOsBootImageFromConfigMapOrFail(platform, region, *machineSet.GetArchitectureOrFail(), coreosBootimagesCM)
logger.Infof("Current coreOsBootImage: %s", currentCoreOsBootImage)
o.Eventually(ms.GetCoreOsBootImage, "5m", "20s").Should(o.ContainSubstring(currentCoreOsBootImage),
"%s was NOT updated to use the right boot image", ms)
}
logger.Infof("OK!\n")
exutil.By("Patch cloned machinesets to use a wrong boot image")
o.Expect(clonedMS.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedMS)
o.Expect(clonedWrongImageMS.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedWrongImageMS)
o.Expect(clonedOwnedMS.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedOwnedMS)
logger.Infof("OK!\n")
exutil.By("All machinesets should use the right boot image except the one with an owner")
for _, ms := range NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail() {
logger.Infof("Checking boot image in machineset %s", ms.GetName())
currentCoreOsBootImage := getCoreOsBootImageFromConfigMapOrFail(platform, region, *machineSet.GetArchitectureOrFail(), coreosBootimagesCM)
logger.Infof("Current coreOsBootImage: %s", currentCoreOsBootImage)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the currently configured coreos boot image")
if ms.GetName() == clonedOwnedMSName {
o.Consistently(ms.GetCoreOsBootImage, "15s", "5s").Should(o.Equal(fakeImageName),
"%s was patched and it is using the right boot image. Machinesets with owners should NOT be patched.", ms)
} else {
o.Eventually(ms.GetCoreOsBootImage, "5m", "20s").Should(o.ContainSubstring(currentCoreOsBootImage),
"%s was NOT updated to use the right boot image", ms)
o.Eventually(ms.GetUserDataSecret, "1m", "20s").Should(o.ContainSubstring("worker-user-data-managed"),
"%s was NOT updated to use the right user-data secret", ms)
}
}
logger.Infof("OK!\n")
exutil.By("Scale up one of the fixed machinesets to make sure that they are working fine")
logger.Infof("Scaling up machineset %s", clonedMS.GetName())
defer wMcp.waitForComplete()
defer clonedMS.ScaleTo(0)
o.Expect(clonedMS.ScaleTo(1)).To(o.Succeed(),
"Error scaling up MachineSet %s", clonedMS.GetName())
logger.Infof("Waiting %s machineset for being ready", clonedMS)
o.Eventually(clonedMS.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", clonedMS.GetName())
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
783341b5-e244-4718-b977-93b3482e3e99
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74239-[OnCLayer] ManagedBootImages. Restore Partial MachineSet images [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_bootimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74239-[OnCLayer] ManagedBootImages. Restore Partial MachineSet images [Disruptive]", func() {
var (
fakeImageName = "fake-coreos-bootimage-name"
coreosBootimagesCM = NewConfigMap(oc.AsAdmin(), MachineConfigNamespace, "coreos-bootimages")
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
clonedMSLabelName = "cloned-tc-74239-label"
clonedMSNoLabelName = "cloned-tc-74239-no-label"
clonedMSLabelOwnedName = "cloned-tc-74239-label-owned"
labelName = "test"
labelValue = "update"
platform = exutil.CheckPlatform(oc)
region = getCurrentRegionOrFail(oc)
)
exutil.By("Opt-in boot images update")
defer machineConfiguration.SetSpec(machineConfiguration.GetSpecOrFail())
o.Expect(
machineConfiguration.SetPartialManagedBootImagesConfig(labelName, labelValue),
).To(o.Succeed(), "Error configuring Partial managedBootImages in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Clone the first machineset twice")
clonedMSLabel, err := machineSet.Duplicate(clonedMSLabelName)
defer clonedMSLabel.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
clonedMSNoLabel, err := machineSet.Duplicate(clonedMSNoLabelName)
defer clonedMSNoLabel.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("OK!\n")
exutil.By("Clone first machineset again and set an owner for the cloned machineset")
logger.Infof("Cloning machineset")
clonedMSLabelOwned, err := machineSet.Duplicate(clonedMSLabelOwnedName)
defer clonedMSLabelOwned.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("Setting a fake owner")
o.Expect(
clonedMSLabelOwned.Patch("merge", `{"metadata":{"ownerReferences": [{"apiVersion": "fake","blockOwnerDeletion": true,"controller": true,"kind": "fakekind","name": "master","uid": "fake-uuid"}]}}`),
).To(o.Succeed(), "Error patching %s with a fake owner", clonedMSLabelOwned)
logger.Infof("OK!\n")
exutil.By("Label one of the cloned images and the clonned image with the owner configuration")
o.Expect(clonedMSLabel.AddLabel(labelName, labelValue)).To(o.Succeed(),
"Error labeling %s", clonedMSLabel)
o.Expect(clonedMSLabelOwned.AddLabel(labelName, labelValue)).To(o.Succeed(),
"Error labeling %s", clonedMSLabel)
logger.Infof("OK!\n")
exutil.By("Patch the clonned machineset to configure a new boot image")
o.Expect(clonedMSLabel.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedMSLabel)
o.Expect(clonedMSNoLabel.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedMSNoLabel)
o.Expect(clonedMSLabelOwned.SetCoreOsBootImage(fakeImageName)).To(o.Succeed(),
"Error setting a new boot image in %s", clonedMSLabelOwned)
logger.Infof("OK!\n")
exutil.By("The labeled machineset without owner should be updated")
currentCoreOsBootImage := getCoreOsBootImageFromConfigMapOrFail(platform, region, *machineSet.GetArchitectureOrFail(), coreosBootimagesCM)
logger.Infof("Current coreOsBootImage: %s", currentCoreOsBootImage)
o.Eventually(clonedMSLabel.GetCoreOsBootImage, "5m", "20s").Should(o.ContainSubstring(currentCoreOsBootImage),
"%s was NOT updated to use the right boot image", clonedMSLabel)
o.Eventually(clonedMSLabel.GetUserDataSecret, "1m", "20s").Should(o.ContainSubstring("worker-user-data-managed"),
"%s was NOT updated to use the right user-data secret", clonedMSLabel)
logger.Infof("OK!\n")
exutil.By("The labeled machineset with owner should NOT be updated")
o.Consistently(clonedMSLabelOwned.GetCoreOsBootImage, "15s", "5s").Should(o.Equal(fakeImageName),
"%s was patched and it is using the right boot image. Machinesets with owners should NOT be patched.", clonedMSLabelOwned)
logger.Infof("OK!\n")
exutil.By("The machineset without label should NOT be updated")
o.Consistently(clonedMSNoLabel.GetCoreOsBootImage, "15s", "5s").Should(o.Equal(fakeImageName),
"%s was patched and it is using the right boot image. Machinesets with owners should NOT be patched.", clonedMSNoLabel)
logger.Infof("OK!\n")
exutil.By("Scale up the fixed machinessetset to make sure that it is working fine")
logger.Infof("Scaling up machineset %s", clonedMSLabel.GetName())
defer wMcp.waitForComplete()
defer clonedMSLabel.ScaleTo(0)
o.Expect(clonedMSLabel.ScaleTo(1)).To(o.Succeed(),
"Error scaling up MachineSet %s", clonedMSLabel.GetName())
logger.Infof("Waiting %s machineset for being ready", clonedMSLabel)
o.Eventually(clonedMSLabel.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", clonedMSLabel.GetName())
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
9102199e-c689-4177-982d-af908015ebce
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74764-[P1][OnCLayer] ManagedBootImages. Delete machineset when error [Disruptive]
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_bootimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74764-[P1][OnCLayer] ManagedBootImages. Delete machineset when error [Disruptive]", func() {
var (
machineConfiguration = GetMachineConfiguration(oc.AsAdmin())
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
clonedMSName = "cloned-tc-74764-copy"
labelName = "test"
labelValue = "update"
expectedFailedMessageRegexp = regexp.QuoteMeta("Error(s): error syncing MAPI MachineSet " +
clonedMSName +
": unexpected OwnerReference: fakekind/master. Please remove this machineset from boot image management to avoid errors")
expectedFailedProgressMessage = "Reconciled 0 of 1 MAPI MachineSets | Reconciled 0 of 0 CAPI MachineSets | Reconciled 0 of 0 CAPI MachineDeployments"
expectedOKMessage = "0 Degraded MAPI MachineSets | 0 Degraded CAPI MachineSets | 0 CAPI MachineDeployments"
)
exutil.By("Opt-in boot images update")
defer machineConfiguration.SetSpec(machineConfiguration.GetSpecOrFail())
o.Expect(
machineConfiguration.SetPartialManagedBootImagesConfig(labelName, labelValue),
).To(o.Succeed(), "Error configuring Partial managedBootImages in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Clone the first machineset")
clonedMS, err := machineSet.Duplicate(clonedMSName)
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
defer clonedMS.Delete()
logger.Infof("OK!\n")
exutil.By("Add a fake owner to the new cloned machineset")
o.Expect(
clonedMS.Patch("merge", `{"metadata":{"ownerReferences": [{"apiVersion": "fake","blockOwnerDeletion": true,"controller": true,"kind": "fakekind","name": "master","uid": "fake-uuid"}]}}`),
).To(o.Succeed(), "Error patching %s with a fake owner", clonedMS)
logger.Infof("OK!\n")
exutil.By("Label the cloned machineset so that its boot image is updated by MCO")
o.Expect(clonedMS.AddLabel(labelName, labelValue)).To(o.Succeed(),
"Error labeling %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Check that an error is reported in the machineconfiguration resource")
o.Eventually(machineConfiguration, "5m", "10s").Should(HaveConditionField("BootImageUpdateDegraded", "status", "True"),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "10s").Should(HaveConditionField("BootImageUpdateDegraded", "message", o.MatchRegexp(expectedFailedMessageRegexp)),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
logger.Infof("OK!\n")
exutil.By("Check reported progress in machineconfiguration resource")
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateProgressing", "message", expectedFailedProgressMessage),
"Progress message is not the expected one.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateProgressing", "status", "False"),
"Progress status is not the expected one.\n%s", machineConfiguration.PrettyString())
logger.Infof("OK!\n")
exutil.By("Delete the new cloned machineset")
o.Expect(clonedMS.Delete()).To(o.Succeed(), "Error deleting %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Check the machineconfiguration resource is not reporting errors anymore and the progress is OK")
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "status", "False"),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "message", expectedOKMessage),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateProgressing", "status", "False"),
"Progress status is not the expected one.\n%s", machineConfiguration.PrettyString())
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
1c05107b-c1b3-4d18-aca6-2269f0497933
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74751-[P2][OnCLayer] ManagedBootImages. Fix errors [Disruptive]
|
['"regexp"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_bootimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-74751-[P2][OnCLayer] ManagedBootImages. Fix errors [Disruptive]", func() {
var (
coreosBootimagesCM = NewConfigMap(oc.AsAdmin(), MachineConfigNamespace, "coreos-bootimages")
machineConfiguration = GetMachineConfiguration(oc.AsAdmin())
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
clonedMSName = "cloned-tc-74751-copy"
labelName = "test"
labelValue = "update"
fakearch = "fake-arch"
expectedFailedMessageRegexp = regexp.QuoteMeta("Error(s): error syncing MAPI MachineSet " +
clonedMSName +
": failed to fetch arch during machineset sync: invalid architecture value found in annotation: kubernetes.io/arch=" + fakearch)
platform = exutil.CheckPlatform(oc)
region = getCurrentRegionOrFail(oc)
)
exutil.By("Opt-in boot images update")
defer machineConfiguration.SetSpec(machineConfiguration.GetSpecOrFail())
o.Expect(
machineConfiguration.SetPartialManagedBootImagesConfig(labelName, labelValue),
).To(o.Succeed(), "Error configuring Partial managedBootImages in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Clone the first machineset")
clonedMS, err := machineSet.Duplicate(clonedMSName)
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
defer clonedMS.Delete()
logger.Infof("OK!\n")
exutil.By("Set a wrong architecture in the cloned image")
o.Expect(clonedMS.SetArchitecture(fakearch)).To(o.Succeed(), "Error setting a fake architecture in %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Set a wrong boot image in the cloned image")
o.Expect(clonedMS.SetCoreOsBootImage("fake-image")).To(o.Succeed(), "Error setting a fake boot image in %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Check that no failures are being reported")
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "status", "False"),
"Expected %s not to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateProgressing", "status", "False"),
"Expected %s not to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
logger.Infof("OK!\n")
exutil.By("Label the cloned machineset so that its boot image is updated by MCO")
o.Expect(clonedMS.AddLabel(labelName, labelValue)).To(o.Succeed(),
"Error labeling %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Check that an error is reported in the machineconfiguration resource and that there is no progress")
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "status", "True"),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "message", o.MatchRegexp(expectedFailedMessageRegexp)),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateProgressing", "status", "False"),
"Progress status is not the expected one.\n%s", machineConfiguration.PrettyString())
// since it will be in "progressing" status for a very short time, we cant poll the value. We need to use the lasttransition date
lastProgressTransition := machineConfiguration.GetOrFail(`{.status.conditions[?(@.type=="BootImageUpdateProgressing")].lastTransitionTime}`)
logger.Infof("OK!\n")
exutil.By("Set the right architecture in the cloneed machineset")
o.Expect(clonedMS.SetArchitecture(machineSet.GetArchitectureOrFail().String())).To(o.Succeed(), "Error fixing the problem in the architecture in %s", clonedMS)
logger.Infof("OK!\n")
exutil.By("Check that no error is reported anymore in the machineconfiguration resource and the progress was OK")
// We need to poll requently since it will be on "progressing" status a very short time
o.Eventually(machineConfiguration, "20s", "1s").ShouldNot(HaveConditionField("BootImageUpdateProgressing", "lastTransitionTime", lastProgressTransition),
"Progress status did not change, but it should have been moved to 'true' and back to 'false' .\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "2m", "10s").Should(HaveConditionField("BootImageUpdateProgressing", "status", "False"),
"Progress status is not the expected one.\n%s", machineConfiguration.PrettyString())
o.Eventually(machineConfiguration, "5m", "20s").Should(HaveConditionField("BootImageUpdateDegraded", "status", "False"),
"Expected %s to be BootImageUpdateDegraded.\n%s", machineConfiguration.PrettyString())
logger.Infof("OK!\n")
exutil.By("Check that the boot image was updated")
currentCoreOsBootImage := getCoreOsBootImageFromConfigMapOrFail(platform, region, *machineSet.GetArchitectureOrFail(), coreosBootimagesCM)
logger.Infof("Current coreOsBootImage: %s", currentCoreOsBootImage)
o.Eventually(clonedMS.GetCoreOsBootImage, "5m", "20s").Should(o.ContainSubstring(currentCoreOsBootImage),
"%s was NOT updated to use the right boot image", clonedMS)
logger.Infof("OK!\n")
})
| |||||
test
|
openshift/openshift-tests-private
|
88ac6196-a021-457a-8cb4-5b987964234f
|
mco_hypershift
|
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
package mco
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-mco] MCO hypershift", func() {
defer g.GinkgoRecover()
var (
// init cli object, temp namespace contains prefix mco.
// tip: don't put this in BeforeEach/JustBeforeEach, you will get error
// "You may only call AfterEach from within a Describe, Context or When"
oc = exutil.NewCLIForKubeOpenShift("mco-hypershift")
// temp dir to store all test files, and it will be recycled when test is finished
tmpdir string
// whether hypershift is enabled
hypershiftEnabled bool
// declare hypershift test driver
ht *HypershiftTest
)
g.JustBeforeEach(func() {
// check support platform for this test. only aws is support
skipTestIfSupportedPlatformNotMatched(oc, "aws")
preChecks(oc)
tmpdir = createTmpDir()
hypershiftEnabled = isHypershiftEnabled(oc)
ht = &HypershiftTest{
NewSharedContext(),
oc,
HypershiftCli{},
GetCloudCredential(oc),
tmpdir,
exutil.GetHyperShiftHostedClusterNameSpace(oc),
}
// in hypershift enabled env, like prow or cluster installed with hypershift template
// operator and hosted cluster are available by default
// skip operator and hosted cluster install steps
if !hypershiftEnabled {
// create/recycle aws s3 bucket
ht.CreateBucket()
// install hypershift
ht.InstallOnAws()
// create hosted cluster w/o node pool
ht.CreateClusterOnAws()
} else {
hostedClusterName := getFirstHostedCluster(oc)
hostedClusterNs := exutil.GetHyperShiftHostedClusterNameSpace(oc)
// OCPQE-16036 check hosted cluster platform type, we only support create nodepool on aws based hostedcluster.
hostedClusterPlatform, err := exutil.GetHostedClusterPlatformType(oc, hostedClusterName, hostedClusterNs)
o.Expect(err).NotTo(o.HaveOccurred(), "Get hostedcluster platform type failed")
logger.Debugf("hostedcluster platform type is %s", hostedClusterPlatform)
if hostedClusterPlatform != exutil.AWSPlatform {
g.Skip(fmt.Sprintf("hostedcluster platform type [%s] is not aws, skip this test", hostedClusterPlatform))
}
ht.Put(TestCtxKeyCluster, hostedClusterName)
}
})
g.JustAfterEach(func() {
// only do clean up (destroy hostedcluster/uninstall hypershift/delete bucket) for env that hypershift is not pre-installed
if !hypershiftEnabled {
ht.DestroyClusterOnAws()
ht.Uninstall()
ht.DeleteBucket()
}
os.RemoveAll(tmpdir)
logger.Infof("test dir %s is cleaned up", tmpdir)
})
g.It("Author:rioliu-Longduration-HyperShiftMGMT-NonPreRelease-High-54328-hypershift Add new file on hosted cluster node via config map [Disruptive]", func() {
// create node pool with replica=2
// destroy node pool then delete config map
defer ht.DeleteMcConfigMap()
defer ht.DestroyNodePoolOnAws()
ht.CreateNodePoolOnAws("2")
// create config map which contains machine config
ht.CreateMcConfigMap()
// patch node pool to update config name with new config map
ht.PatchNodePoolToTriggerUpdate()
// create kubeconfig for hosted cluster
ht.CreateKubeConfigForCluster()
// check machine config annotations on nodes to make sure update is done
ht.CheckMcAnnotationsOnNode()
// check file content on hosted cluster nodes
ht.VerifyFileContent()
})
g.It("Author:rioliu-Longduration-HyperShiftMGMT-NonPreRelease-High-54366-hypershift Update release image of node pool [Disruptive]", func() {
// check arch, only support amd64
architecture.SkipNonAmd64SingleArch(oc)
// check latest accepted build, if it is same as hostedcluster version, skip this case
ht.skipTestIfLatestAcceptedBuildIsSameAsHostedClusterVersion()
// create a nodepool with 2 replicas and enable in place upgrade
defer ht.DestroyNodePoolOnAws()
ht.CreateNodePoolOnAws("2")
// patch nodepool with latest nightly build and wait until version update to complete
// compare nodepool version and build version. they should be same
ht.PatchNodePoolToUpdateReleaseImage()
// create kubeconfig for hosted cluster
ht.CreateKubeConfigForCluster()
// check machine config annotations on nodes to make sure update is done
ht.CheckMcAnnotationsOnNode()
})
g.It("Author:rioliu-Longduration-HyperShiftMGMT-NonPreRelease-High-55356-[P1] hypershift Honor MaxUnavailable for inplace upgrades [Disruptive]", func() {
// create node pool with replica=3
// destroy node pool then delete config map
defer ht.DeleteMcConfigMap()
defer ht.DestroyNodePoolOnAws()
ht.CreateNodePoolOnAws("3") // TODO: change the replica to 5 when bug https://issues.redhat.com/browse/OCPBUGS-2870 is fixed
// create config map which contains machine config
ht.CreateMcConfigMap()
// patch node pool to update config name with new config map
ht.PatchNodePoolToUpdateMaxUnavailable("2")
// create kubeconfig for hosted cluster
ht.CreateKubeConfigForCluster()
// check whether nodes are updating in parallel
ht.CheckNodesAreUpdatingInParallel(2)
})
})
// GetCloudCredential get cloud credential impl by platform name
func GetCloudCredential(oc *exutil.CLI) CloudCredential {
var (
cc CloudCredential
ce error
)
platform := exutil.CheckPlatform(oc)
switch platform {
case "aws":
cc, ce = NewAwsCredential(oc, "default")
o.Expect(ce).NotTo(o.HaveOccurred(), "extract aws cred from cluster failed")
default:
logger.Infof("no impl of CloudCredential for platform %s right now", platform)
}
return cc
}
// HypershiftTest tester for hypershift, contains required tool e.g client, cli, cred, shared context etc.
type HypershiftTest struct {
*SharedContext
oc *exutil.CLI
cli HypershiftCli
cred CloudCredential
dir, clusterNS string
}
// InstallOnAws install hypershift on aws
func (ht *HypershiftTest) InstallOnAws() {
exutil.By("install hypershift operator")
awscred := ht.cred.(*AwsCredential)
_, installErr := ht.cli.Install(
NewAwsInstallOptions().
WithBucket(ht.StrValue(TestCtxKeyBucket)).
WithCredential(awscred.file).
WithRegion(awscred.region).
WithEnableDefaultingWebhook().
WithHypershiftImage(ht.getHypershiftImage()))
o.Expect(installErr).NotTo(o.HaveOccurred(), "install hypershift operator via cli failed")
// check whether pod under ns hypershift is running
exutil.AssertAllPodsToBeReadyWithPollerParams(ht.oc, "hypershift", 20*time.Second, 5*time.Minute)
logger.Infof("hypershift is installed on AWS successfully")
}
// Uninstall uninstall hypershift
func (ht *HypershiftTest) Uninstall() {
ht.cli.Uninstall()
}
// CreateBucket create s3 bucket
func (ht *HypershiftTest) CreateBucket() {
exutil.By("configure aws-cred file with default profile")
const (
bucketPolicyTemplate = `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*"
}
]
}`
)
// create a temp file to store aws credential in shared temp dir
credfile := generateTempFilePath(ht.dir, "aws-cred-*.conf")
// call CloudCredential#OutputToFile to write cred info to temp file
o.Expect(ht.cred.OutputToFile(credfile)).NotTo(o.HaveOccurred(), "write aws cred to file failed")
exutil.By("create s3 bucket for installer")
// get aws cred
awscred := ht.cred.(*AwsCredential)
// get infra name as part of bucket name
infraName := NewResource(ht.oc.AsAdmin(), "infrastructure", "cluster").GetOrFail("{.status.infrastructureName}")
// bucket name pattern: $infraName-$component-$region-$randstr e.g. rioliu-092301-mvw2f-hypershift-us-east-2-glnjmsex
bucket := fmt.Sprintf("%s-hypershift-%s-%s", infraName, awscred.region, exutil.GetRandomString())
ht.Put(TestCtxKeyBucket, bucket)
// init s3 client
s3 := exutil.NewS3ClientFromCredFile(awscred.file, "default", awscred.region)
// create bucket if it does not exists
o.Expect(s3.CreateBucket(bucket)).NotTo(o.HaveOccurred(), "create aws s3 bucket %s failed", bucket)
policy := fmt.Sprintf(bucketPolicyTemplate, bucket)
o.Expect(s3.PutBucketPolicy(bucket, policy)).To(o.Succeed(), "an error happened while adding a policy to the bucket")
}
// DeleteBucket delete s3 bucket
func (ht *HypershiftTest) DeleteBucket() {
exutil.By("delete s3 bucket to recycle cloud resource")
// get aws cred
awscred := ht.cred.(*AwsCredential)
// init s3 client
s3 := exutil.NewS3ClientFromCredFile(awscred.file, "default", awscred.region)
// delete bucket, ignore not found
bucketName := ht.StrValue(TestCtxKeyBucket)
o.Expect(s3.DeleteBucket(bucketName)).NotTo(o.HaveOccurred(), "delete aws s3 bucket %s failed", bucketName)
}
// CreateClusterOnAws create hosted cluster on aws
func (ht *HypershiftTest) CreateClusterOnAws() {
exutil.By("extract pull-secret from namespace openshift-config")
// extract pull secret and save it to temp dir
secret := NewSecret(ht.oc.AsAdmin(), "openshift-config", "pull-secret")
o.Expect(
secret.ExtractToDir(ht.dir)).
NotTo(o.HaveOccurred(),
fmt.Sprintf("extract pull-secret from openshift-config to %s failed", ht.dir))
secretFile := filepath.Join(ht.dir, ".dockerconfigjson")
logger.Infof("pull-secret info is saved to %s", secretFile)
exutil.By("get base domain from resource dns/cluster")
baseDomain := getBaseDomain(ht.oc)
logger.Infof("based domain is: %s", baseDomain)
exutil.By("create hosted cluster on AWS")
name := fmt.Sprintf("mco-cluster-%s", exutil.GetRandomString())
ht.Put(TestCtxKeyCluster, name)
awscred := ht.cred.(*AwsCredential)
// We will use the same release image that the host cluster is using
releaseImage, err := GetClusterDesiredReleaseImage(ht.oc)
o.Expect(err).NotTo(o.HaveOccurred())
logger.Infof("Using release image: %s", releaseImage)
createClusterOpts := NewAwsCreateClusterOptions().
WithAwsCredential(awscred.file).
WithBaseDomain(baseDomain).
WithPullSecret(secretFile).
WithRegion(awscred.region).
WithReleaseImage(releaseImage).
WithName(name)
arch := architecture.ClusterArchitecture(ht.oc)
if arch == architecture.ARM64 {
createClusterOpts = createClusterOpts.WithArch("arm64")
}
_, createClusterErr := ht.cli.CreateCluster(createClusterOpts)
o.Expect(createClusterErr).NotTo(o.HaveOccurred(), "create hosted cluster on aws failed")
ht.clusterNS = exutil.GetHyperShiftHostedClusterNameSpace(ht.oc)
logger.Infof("the hosted cluster namespace is: %s", ht.clusterNS)
// wait for hosted control plane is available
defer ht.oc.AsAdmin().Run("get").Args("-n", fmt.Sprintf("%s-%s", ht.clusterNS, name), "pods").Execute() // for debugging purpose
exutil.AssertAllPodsToBeReadyWithPollerParams(ht.oc, fmt.Sprintf("%s-%s", ht.clusterNS, name), 30*time.Second, 10*time.Minute)
logger.Infof("hosted cluster %s is created successfully on AWS", name)
}
// DestroyClusterOnAws destroy hosted cluster on aws
func (ht *HypershiftTest) DestroyClusterOnAws() {
clusterName := ht.StrValue(TestCtxKeyCluster)
exutil.By(fmt.Sprintf("destroy hosted cluster %s", clusterName))
awscred := ht.cred.(*AwsCredential)
destroyClusterOpts := NewAwsDestroyClusterOptions().
WithName(clusterName).
WithAwsCredential(awscred.file).
WithDestroyCloudResource()
_, destroyClusterErr := ht.cli.DestroyCluster(destroyClusterOpts)
o.Expect(destroyClusterErr).NotTo(o.HaveOccurred(), fmt.Sprintf("destroy hosted cluster %s failed", clusterName))
logger.Infof(fmt.Sprintf("hosted cluster %s is destroyed successfully", clusterName))
}
// CreateNodePoolOnAws create node pool on aws
// param: replica nodes # in node pool
func (ht *HypershiftTest) CreateNodePoolOnAws(replica string) {
exutil.By("create rendered node pool")
clusterName := ht.StrValue(TestCtxKeyCluster)
name := fmt.Sprintf("%s-np-%s", clusterName, exutil.GetRandomString())
renderNodePoolOpts := NewAwsCreateNodePoolOptions().
WithName(name).
WithClusterName(clusterName).
WithNodeCount(replica).
WithNamespace(ht.clusterNS).
WithRender()
arch := architecture.ClusterArchitecture(ht.oc)
if arch == architecture.ARM64 {
renderNodePoolOpts = renderNodePoolOpts.WithArch("arm64")
}
renderedNp, renderNpErr := ht.cli.CreateNodePool(renderNodePoolOpts)
o.Expect(renderNpErr).NotTo(o.HaveOccurred(), fmt.Sprintf("create node pool %s failed", name))
o.Expect(renderedNp).NotTo(o.BeEmpty(), "rendered nodepool is empty")
// replace upgradeType to InPlace
renderedNp = strings.ReplaceAll(renderedNp, "Replace", "InPlace")
logger.Infof("change upgrade type from Replace to InPlace in rendered node pool")
// write rendered node pool to temp file
renderedFile := filepath.Join(ht.dir, fmt.Sprintf("%s-%s.yaml", name, exutil.GetRandomString()))
o.Expect(
os.WriteFile(renderedFile, []byte(renderedNp), 0o600)).
NotTo(o.HaveOccurred(), fmt.Sprintf("write rendered node pool to %s failed", renderedFile))
logger.Infof("rendered node pool is saved to file %s", renderedFile)
// apply updated node pool file
o.Expect(
ht.oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", renderedFile).Execute()).
NotTo(o.HaveOccurred(), "create rendered node pool failed")
logger.Infof("poll node pool status, expected is desired nodes == current nodes")
np := NewHypershiftNodePool(ht.oc.AsAdmin(), ht.clusterNS, name)
logger.Debugf(np.PrettyString())
// poll node pool state, expected is desired nodes == current nodes
np.WaitUntilReady()
ht.Put(TestCtxKeyNodePool, name)
}
// DestroyNodePoolOnAws delete node pool related awsmachine first, then delete node pool
func (ht *HypershiftTest) DestroyNodePoolOnAws() {
exutil.By("destroy nodepool related resources")
logger.Infof("delete node pool related machines")
npName := ht.StrValue(TestCtxKeyNodePool)
clusterName := ht.StrValue(TestCtxKeyCluster)
awsMachines, getAwsMachineErr := NewNamespacedResourceList(ht.oc.AsAdmin(), HypershiftAwsMachine, fmt.Sprintf("%s-%s", ht.clusterNS, clusterName)).GetAll()
o.Expect(getAwsMachineErr).NotTo(o.HaveOccurred(), "get awsmachines failed for hosted cluster %s", clusterName)
o.Expect(awsMachines).ShouldNot(o.BeEmpty())
for _, machine := range awsMachines {
clonedFromName := machine.GetAnnotationOrFail(`cluster.x-k8s.io/cloned-from-name`)
if clonedFromName == npName {
logger.Infof("deleting awsmachine %s", machine.GetName())
deleteMachineErr := machine.Delete()
if deleteMachineErr != nil {
// here we just log the error, will not terminate the clean up process.
// if any of the deletion is failed, it will be recycled by hypershift
logger.Errorf("delete awsmachine %s failed\n %v", machine.GetName(), deleteMachineErr)
} else {
logger.Infof("awsmachine %s is deleted successfully", machine.GetName())
}
}
}
logger.Infof("all the awsmachines of nodepool %s are deleted", npName)
NewNamespacedResource(ht.oc.AsAdmin(), HypershiftCrNodePool, ht.clusterNS, npName).DeleteOrFail()
logger.Infof("nodepool %s is deleted successfully", npName)
}
// CreateMcConfigMap create config map contains machine config
func (ht *HypershiftTest) CreateMcConfigMap() {
exutil.By("create machine config in config map")
template := generateTemplateAbsolutePath(TmplHypershiftMcConfigMap)
cmName := fmt.Sprintf("mc-cm-%s", exutil.GetRandomString())
mcName := fmt.Sprintf("99-mc-test-%s", exutil.GetRandomString())
mcpName := MachineConfigPoolWorker
filePath := fmt.Sprintf("/home/core/test-%s", exutil.GetRandomString())
exutil.ApplyNsResourceFromTemplate(
ht.oc.AsAdmin(),
ht.clusterNS,
"--ignore-unknown-parameters=true",
"-f", template,
"-p",
"CMNAME="+cmName,
"MCNAME="+mcName,
"POOL="+mcpName,
"FILEPATH="+filePath,
)
// get config map to check it exists or not
cm := NewNamespacedResource(ht.oc.AsAdmin(), "cm", ht.clusterNS, cmName)
o.Expect(cm.Exists()).Should(o.BeTrue(), "mc config map does not exist")
logger.Debugf(cm.PrettyString())
logger.Infof("config map %s is created successfully", cmName)
ht.Put(TestCtxKeyConfigMap, cmName)
ht.Put(TestCtxKeyFilePath, filePath)
}
// DeleteMcConfigMap when node pool is destroyed, delete config map
func (ht *HypershiftTest) DeleteMcConfigMap() {
exutil.By("delete config map")
cmName := ht.StrValue(TestCtxKeyConfigMap)
NewNamespacedResource(ht.oc.AsAdmin(), "cm", ht.clusterNS, cmName).DeleteOrFail()
logger.Infof("config map %s is deleted successfully", cmName)
}
// PatchNodePoolToTriggerUpdate patch node pool to update config map
// this operation will trigger in-place update
func (ht *HypershiftTest) PatchNodePoolToTriggerUpdate() {
exutil.By("patch node pool to add config setting")
npName := ht.StrValue(HypershiftCrNodePool)
cmName := ht.StrValue(TestCtxKeyConfigMap)
np := NewHypershiftNodePool(ht.oc.AsAdmin(), ht.clusterNS, npName)
o.Expect(np.Patch("merge", fmt.Sprintf(`{"spec":{"config":[{"name": "%s"}]}}`, cmName))).NotTo(o.HaveOccurred(), "patch node pool with cm setting failed")
o.Expect(np.GetOrFail(`{.spec.config}`)).Should(o.ContainSubstring(cmName), "node pool does not have cm config")
logger.Debugf(np.PrettyString())
exutil.By("wait node pool update to complete")
np.WaitUntilConfigIsUpdating()
np.WaitUntilConfigUpdateIsCompleted()
}
// PatchNodePoolToUpdateReleaseImage patch node pool to update spec.release.image
// this operation will update os image on hosted cluster nodes
func (ht *HypershiftTest) PatchNodePoolToUpdateReleaseImage() {
exutil.By("patch node pool to update release image")
npName := ht.StrValue(HypershiftCrNodePool)
np := NewHypershiftNodePool(ht.oc.AsAdmin(), ht.clusterNS, npName)
versionSlice := strings.Split(np.GetVersion(), ".")
imageURL, version := getLatestImageURL(ht.oc, fmt.Sprintf("%s.%s", versionSlice[0], versionSlice[1])) // get latest nightly build based on release version
o.Expect(np.Patch("merge", fmt.Sprintf(`{"spec":{"release":{"image": "%s"}}}`, imageURL))).NotTo(o.HaveOccurred(), "patch node pool with release image failed")
o.Expect(np.GetOrFail(`{.spec.release.image}`)).Should(o.ContainSubstring(imageURL), "node pool does not have update release image config")
logger.Debugf(np.PrettyString())
exutil.By("wait node pool update to complete")
np.WaitUntilVersionIsUpdating()
np.WaitUntilVersionUpdateIsCompleted()
o.Expect(np.GetVersion()).Should(o.Equal(version), "version of node pool is not updated correctly")
}
// PatchNodePoolToUpdateMaxUnavailable update node pool to enable maxUnavailable support
func (ht *HypershiftTest) PatchNodePoolToUpdateMaxUnavailable(maxUnavailable string) {
exutil.By("patch node pool to update property spec.management.inPlace.maxUnavailable and spec.config")
npName := ht.StrValue(HypershiftCrNodePool)
cmName := ht.StrValue(TestCtxKeyConfigMap)
// update maxUnavailable
np := NewHypershiftNodePool(ht.oc.AsAdmin(), ht.clusterNS, npName)
o.Expect(np.Patch("merge", fmt.Sprintf(`{"spec":{"management":{"inPlace":{"maxUnavailable":%s}}}}`, maxUnavailable))).NotTo(o.HaveOccurred(), "patch node pool with maxUnavailable setting failed")
o.Expect(np.GetOrFail(`{.spec.management.inPlace}`)).Should(o.ContainSubstring("maxUnavailable"), "node pool does not have maxUnavailable config")
// update config
o.Expect(np.Patch("merge", fmt.Sprintf(`{"spec":{"config":[{"name": "%s"}]}}`, cmName))).NotTo(o.HaveOccurred(), "patch node pool with cm setting failed")
o.Expect(np.GetOrFail(`{.spec.config}`)).Should(o.ContainSubstring(cmName), "node pool does not have cm config")
logger.Debugf(np.PrettyString())
exutil.By("check node pool update is started")
np.WaitUntilConfigIsUpdating()
}
// CheckNodesAreUpdatingInParallel check whether nodes are updating in parallel, nodeNum should be equal to maxUnavailable setting
func (ht *HypershiftTest) CheckNodesAreUpdatingInParallel(nodeNum int) {
npName := ht.StrValue(HypershiftCrNodePool)
np := NewHypershiftNodePool(ht.oc.AsAdmin(), ht.clusterNS, npName)
defer np.WaitUntilConfigUpdateIsCompleted()
exutil.By(fmt.Sprintf("checking whether nodes are updating in parallel, expected node num is %v", nodeNum))
kubeconf := ht.StrValue(TestCtxKeyKubeConfig)
ht.oc.SetGuestKubeconf(kubeconf)
nodesInfo := ""
mcStatePoller := func() int {
nodeStates := NewNodeList(ht.oc.AsAdmin().AsGuestKubeconf()).McStateSnapshot()
logger.Infof("machine-config state of all hosted cluster nodes: %s", nodeStates)
nodesInfo, _ = ht.oc.AsAdmin().AsGuestKubeconf().Run("get").Args("node").Output()
return strings.Count(nodeStates, "Working")
}
o.Eventually(mcStatePoller, "3m", "10s").Should(o.BeNumerically("==", nodeNum), "updating node num not equal to maxUnavailable value.\n%s", nodesInfo)
o.Consistently(mcStatePoller, "8m", "10s").Should(o.BeNumerically("<=", nodeNum), "updating node num is greater than maxUnavailable value.\n%s", nodesInfo)
}
// CreateKubeConfigForCluster create kubeconfig for hosted cluster
func (ht *HypershiftTest) CreateKubeConfigForCluster() {
exutil.By("create kubeconfig for hosted cluster")
clusterName := ht.StrValue(TestCtxKeyCluster)
file := filepath.Join(ht.dir, fmt.Sprintf("%s-kubeconfig", clusterName))
_, err := ht.cli.CreateKubeConfig(clusterName, ht.clusterNS, file)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("create kubeconfig for cluster %s failed", clusterName))
logger.Infof("kubeconfig of cluster %s is saved to %s", clusterName, file)
ht.Put(TestCtxKeyKubeConfig, file)
}
// CheckMcAnnotationsOnNode check machine config is updated successfully
func (ht *HypershiftTest) CheckMcAnnotationsOnNode() {
exutil.By("check machine config annotation to verify update is done")
clusterName := ht.StrValue(TestCtxKeyCluster)
kubeconf := ht.StrValue(TestCtxKeyKubeConfig)
npName := ht.StrValue(HypershiftCrNodePool)
ht.oc.SetGuestKubeconf(kubeconf)
np := NewHypershiftNodePool(ht.oc.AsAdmin().AsGuestKubeconf(), ht.clusterNS, npName)
workerNode := np.GetAllLinuxNodesOrFail()[0]
// get machine config name
secrets := NewNamespacedResourceList(ht.oc.AsAdmin(), "secrets", fmt.Sprintf("%s-%s", ht.clusterNS, clusterName))
secrets.SortByTimestamp()
secrets.ByFieldSelector("type=Opaque")
secrets.SetItemsFilter("-1:")
filterdSecrets, getSecretErr := secrets.GetAll()
o.Expect(getSecretErr).NotTo(o.HaveOccurred(), "Get latest secret failed")
userDataSecretName := filterdSecrets[0].GetName()
logger.Infof("get latest user-data secret name %s", userDataSecretName)
// mc name is suffix of the secret name e.g. user-data-inplace-upgrade-fe5d465e
tempSlice := strings.Split(userDataSecretName, "-")
mcName := tempSlice[len(tempSlice)-1]
logger.Infof("machine config name is %s", mcName)
logger.Debugf(workerNode.PrettyString())
desiredConfig := workerNode.GetAnnotationOrFail(NodeAnnotationDesiredConfig)
currentConfig := workerNode.GetAnnotationOrFail(NodeAnnotationCurrentConfig)
desiredDrain := workerNode.GetAnnotationOrFail(NodeAnnotationDesiredDrain)
lastAppliedDrain := workerNode.GetAnnotationOrFail(NodeAnnotationLastAppliedDrain)
reason := workerNode.GetAnnotationOrFail(NodeAnnotationReason)
state := workerNode.GetAnnotationOrFail(NodeAnnotationState)
drainReqID := fmt.Sprintf("uncordon-%s", mcName)
// do assertion for annotations, expected result is like below
// desiredConfig == currentConfig
o.Expect(currentConfig).Should(o.Equal(desiredConfig), "current config not equal to desired config")
// desiredConfig = $mcName
o.Expect(desiredConfig).Should(o.Equal(mcName))
// currentConfig = $mcName
o.Expect(currentConfig).Should(o.Equal(mcName))
// desiredDrain == lastAppliedDrain
o.Expect(desiredDrain).Should(o.Equal(lastAppliedDrain), "desired drain not equal to last applied drain")
// desiredDrain = uncordon-$mcName
o.Expect(desiredDrain).Should(o.Equal(drainReqID), "desired drain id is not expected")
// lastAppliedDrain = uncordon-$mcName
o.Expect(lastAppliedDrain).Should(o.Equal(drainReqID), "last applied drain id is not expected")
// reason is empty
o.Expect(reason).Should(o.BeEmpty(), "reason is not empty")
// state is 'Done'
o.Expect(state).Should(o.Equal("Done"))
}
// VerifyFileContent verify whether config file on node is expected
func (ht *HypershiftTest) VerifyFileContent() {
exutil.By("check whether the test file content is matched ")
filePath := ht.StrValue(TestCtxKeyFilePath)
kubeconf := ht.StrValue(TestCtxKeyKubeConfig)
npName := ht.StrValue(HypershiftCrNodePool)
ht.oc.SetGuestKubeconf(kubeconf)
np := NewHypershiftNodePool(ht.oc.AsAdmin().AsGuestKubeconf(), ht.clusterNS, npName)
workerNode := np.GetAllLinuxNodesOrFail()[0]
// when we call oc debug with guest kubeconfig, temp namespace oc.Namespace()
// cannot be found in hosted cluster.
// copy node object to change namespace to default
clonedNode := workerNode
clonedNode.oc.SetNamespace("default")
rf := NewRemoteFile(clonedNode, filePath)
o.Expect(rf.Fetch()).NotTo(o.HaveOccurred(), "fetch remote file failed")
o.Expect(rf.GetTextContent()).Should(o.ContainSubstring("hello world"), "file content does not match machine config setting")
}
// skipTestIfLatestAcceptedBuildIsSameAsHostedClusterVersion, skip the test if latest accepted nightly build is same as hostedcluster version
func (ht *HypershiftTest) skipTestIfLatestAcceptedBuildIsSameAsHostedClusterVersion() {
// OCPQE-17034, if latest accepted build is same as hosted cluster version. i.e. it is nodepool version as well
// release image update will not happen, skip this case.
// Get hosted cluster version
hostedclusterName := ht.StrValue(TestCtxKeyCluster)
hostedcluster := NewHypershiftHostedCluster(ht.oc.AsAdmin(), ht.clusterNS, hostedclusterName)
hostedclusterVersion := hostedcluster.GetVersion()
// Get latest accepted build
_, latestAcceptedBuild := getLatestImageURL(ht.oc, strings.Join(strings.Split(hostedclusterVersion, ".")[:2], "."))
if hostedclusterVersion == latestAcceptedBuild {
g.Skip(fmt.Sprintf("latest accepted build [%s] is same as hosted cluster version [%s], cannot update release image, skip this case", latestAcceptedBuild, hostedclusterVersion))
}
}
func (ht *HypershiftTest) getHypershiftImage() string {
// get minor release version as image tag
// imageTag, _, cvErr := exutil.GetClusterVersion(ht.oc)
// o.Expect(cvErr).NotTo(o.HaveOccurred(), "Get minor release version error")
// Becaseu of https://issues.redhat.com/browse/OCPQE-26256 we will always use the "latest" image
imageTag := "latest"
repo := "quay.io/hypershift/hypershift-operator"
arch := architecture.ClusterArchitecture(ht.oc)
if arch == architecture.ARM64 || arch == architecture.MULTI {
repo = "quay.io/acm-d/rhtap-hypershift-operator"
}
image := fmt.Sprintf("%s:%s", repo, imageTag)
logger.Infof("Hypershift image is: %s", image)
return image
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
629286fd-46e9-4a89-8017-1dd06d18f877
|
GetCloudCredential
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func GetCloudCredential(oc *exutil.CLI) CloudCredential {
var (
cc CloudCredential
ce error
)
platform := exutil.CheckPlatform(oc)
switch platform {
case "aws":
cc, ce = NewAwsCredential(oc, "default")
o.Expect(ce).NotTo(o.HaveOccurred(), "extract aws cred from cluster failed")
default:
logger.Infof("no impl of CloudCredential for platform %s right now", platform)
}
return cc
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
152f3860-83c7-43f5-8f24-91015942344f
|
InstallOnAws
|
['"time"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) InstallOnAws() {
exutil.By("install hypershift operator")
awscred := ht.cred.(*AwsCredential)
_, installErr := ht.cli.Install(
NewAwsInstallOptions().
WithBucket(ht.StrValue(TestCtxKeyBucket)).
WithCredential(awscred.file).
WithRegion(awscred.region).
WithEnableDefaultingWebhook().
WithHypershiftImage(ht.getHypershiftImage()))
o.Expect(installErr).NotTo(o.HaveOccurred(), "install hypershift operator via cli failed")
// check whether pod under ns hypershift is running
exutil.AssertAllPodsToBeReadyWithPollerParams(ht.oc, "hypershift", 20*time.Second, 5*time.Minute)
logger.Infof("hypershift is installed on AWS successfully")
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
aa89aa2f-f2e4-449b-b899-3571c64b9017
|
Uninstall
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) Uninstall() {
ht.cli.Uninstall()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
6b0daf07-6625-40c4-91e4-1c253e73b990
|
CreateBucket
|
['"fmt"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) CreateBucket() {
exutil.By("configure aws-cred file with default profile")
const (
bucketPolicyTemplate = `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*"
}
]
}`
)
// create a temp file to store aws credential in shared temp dir
credfile := generateTempFilePath(ht.dir, "aws-cred-*.conf")
// call CloudCredential#OutputToFile to write cred info to temp file
o.Expect(ht.cred.OutputToFile(credfile)).NotTo(o.HaveOccurred(), "write aws cred to file failed")
exutil.By("create s3 bucket for installer")
// get aws cred
awscred := ht.cred.(*AwsCredential)
// get infra name as part of bucket name
infraName := NewResource(ht.oc.AsAdmin(), "infrastructure", "cluster").GetOrFail("{.status.infrastructureName}")
// bucket name pattern: $infraName-$component-$region-$randstr e.g. rioliu-092301-mvw2f-hypershift-us-east-2-glnjmsex
bucket := fmt.Sprintf("%s-hypershift-%s-%s", infraName, awscred.region, exutil.GetRandomString())
ht.Put(TestCtxKeyBucket, bucket)
// init s3 client
s3 := exutil.NewS3ClientFromCredFile(awscred.file, "default", awscred.region)
// create bucket if it does not exists
o.Expect(s3.CreateBucket(bucket)).NotTo(o.HaveOccurred(), "create aws s3 bucket %s failed", bucket)
policy := fmt.Sprintf(bucketPolicyTemplate, bucket)
o.Expect(s3.PutBucketPolicy(bucket, policy)).To(o.Succeed(), "an error happened while adding a policy to the bucket")
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
65fb3a64-f35a-401a-a9d9-9d5ecb365487
|
DeleteBucket
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) DeleteBucket() {
exutil.By("delete s3 bucket to recycle cloud resource")
// get aws cred
awscred := ht.cred.(*AwsCredential)
// init s3 client
s3 := exutil.NewS3ClientFromCredFile(awscred.file, "default", awscred.region)
// delete bucket, ignore not found
bucketName := ht.StrValue(TestCtxKeyBucket)
o.Expect(s3.DeleteBucket(bucketName)).NotTo(o.HaveOccurred(), "delete aws s3 bucket %s failed", bucketName)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
5265672c-121f-41d1-aaae-94c1ba440b2b
|
CreateClusterOnAws
|
['"fmt"', '"path/filepath"', '"time"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) CreateClusterOnAws() {
exutil.By("extract pull-secret from namespace openshift-config")
// extract pull secret and save it to temp dir
secret := NewSecret(ht.oc.AsAdmin(), "openshift-config", "pull-secret")
o.Expect(
secret.ExtractToDir(ht.dir)).
NotTo(o.HaveOccurred(),
fmt.Sprintf("extract pull-secret from openshift-config to %s failed", ht.dir))
secretFile := filepath.Join(ht.dir, ".dockerconfigjson")
logger.Infof("pull-secret info is saved to %s", secretFile)
exutil.By("get base domain from resource dns/cluster")
baseDomain := getBaseDomain(ht.oc)
logger.Infof("based domain is: %s", baseDomain)
exutil.By("create hosted cluster on AWS")
name := fmt.Sprintf("mco-cluster-%s", exutil.GetRandomString())
ht.Put(TestCtxKeyCluster, name)
awscred := ht.cred.(*AwsCredential)
// We will use the same release image that the host cluster is using
releaseImage, err := GetClusterDesiredReleaseImage(ht.oc)
o.Expect(err).NotTo(o.HaveOccurred())
logger.Infof("Using release image: %s", releaseImage)
createClusterOpts := NewAwsCreateClusterOptions().
WithAwsCredential(awscred.file).
WithBaseDomain(baseDomain).
WithPullSecret(secretFile).
WithRegion(awscred.region).
WithReleaseImage(releaseImage).
WithName(name)
arch := architecture.ClusterArchitecture(ht.oc)
if arch == architecture.ARM64 {
createClusterOpts = createClusterOpts.WithArch("arm64")
}
_, createClusterErr := ht.cli.CreateCluster(createClusterOpts)
o.Expect(createClusterErr).NotTo(o.HaveOccurred(), "create hosted cluster on aws failed")
ht.clusterNS = exutil.GetHyperShiftHostedClusterNameSpace(ht.oc)
logger.Infof("the hosted cluster namespace is: %s", ht.clusterNS)
// wait for hosted control plane is available
defer ht.oc.AsAdmin().Run("get").Args("-n", fmt.Sprintf("%s-%s", ht.clusterNS, name), "pods").Execute() // for debugging purpose
exutil.AssertAllPodsToBeReadyWithPollerParams(ht.oc, fmt.Sprintf("%s-%s", ht.clusterNS, name), 30*time.Second, 10*time.Minute)
logger.Infof("hosted cluster %s is created successfully on AWS", name)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
68f44545-afbe-4244-82b2-81163447a6d2
|
DestroyClusterOnAws
|
['"fmt"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) DestroyClusterOnAws() {
clusterName := ht.StrValue(TestCtxKeyCluster)
exutil.By(fmt.Sprintf("destroy hosted cluster %s", clusterName))
awscred := ht.cred.(*AwsCredential)
destroyClusterOpts := NewAwsDestroyClusterOptions().
WithName(clusterName).
WithAwsCredential(awscred.file).
WithDestroyCloudResource()
_, destroyClusterErr := ht.cli.DestroyCluster(destroyClusterOpts)
o.Expect(destroyClusterErr).NotTo(o.HaveOccurred(), fmt.Sprintf("destroy hosted cluster %s failed", clusterName))
logger.Infof(fmt.Sprintf("hosted cluster %s is destroyed successfully", clusterName))
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
f3e532bb-d052-480e-a12d-269ab7497287
|
CreateNodePoolOnAws
|
['"fmt"', '"os"', '"path/filepath"', '"strings"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) CreateNodePoolOnAws(replica string) {
exutil.By("create rendered node pool")
clusterName := ht.StrValue(TestCtxKeyCluster)
name := fmt.Sprintf("%s-np-%s", clusterName, exutil.GetRandomString())
renderNodePoolOpts := NewAwsCreateNodePoolOptions().
WithName(name).
WithClusterName(clusterName).
WithNodeCount(replica).
WithNamespace(ht.clusterNS).
WithRender()
arch := architecture.ClusterArchitecture(ht.oc)
if arch == architecture.ARM64 {
renderNodePoolOpts = renderNodePoolOpts.WithArch("arm64")
}
renderedNp, renderNpErr := ht.cli.CreateNodePool(renderNodePoolOpts)
o.Expect(renderNpErr).NotTo(o.HaveOccurred(), fmt.Sprintf("create node pool %s failed", name))
o.Expect(renderedNp).NotTo(o.BeEmpty(), "rendered nodepool is empty")
// replace upgradeType to InPlace
renderedNp = strings.ReplaceAll(renderedNp, "Replace", "InPlace")
logger.Infof("change upgrade type from Replace to InPlace in rendered node pool")
// write rendered node pool to temp file
renderedFile := filepath.Join(ht.dir, fmt.Sprintf("%s-%s.yaml", name, exutil.GetRandomString()))
o.Expect(
os.WriteFile(renderedFile, []byte(renderedNp), 0o600)).
NotTo(o.HaveOccurred(), fmt.Sprintf("write rendered node pool to %s failed", renderedFile))
logger.Infof("rendered node pool is saved to file %s", renderedFile)
// apply updated node pool file
o.Expect(
ht.oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", renderedFile).Execute()).
NotTo(o.HaveOccurred(), "create rendered node pool failed")
logger.Infof("poll node pool status, expected is desired nodes == current nodes")
np := NewHypershiftNodePool(ht.oc.AsAdmin(), ht.clusterNS, name)
logger.Debugf(np.PrettyString())
// poll node pool state, expected is desired nodes == current nodes
np.WaitUntilReady()
ht.Put(TestCtxKeyNodePool, name)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
c7b5776a-bbc3-44ef-a251-a8965691b151
|
DestroyNodePoolOnAws
|
['"fmt"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) DestroyNodePoolOnAws() {
exutil.By("destroy nodepool related resources")
logger.Infof("delete node pool related machines")
npName := ht.StrValue(TestCtxKeyNodePool)
clusterName := ht.StrValue(TestCtxKeyCluster)
awsMachines, getAwsMachineErr := NewNamespacedResourceList(ht.oc.AsAdmin(), HypershiftAwsMachine, fmt.Sprintf("%s-%s", ht.clusterNS, clusterName)).GetAll()
o.Expect(getAwsMachineErr).NotTo(o.HaveOccurred(), "get awsmachines failed for hosted cluster %s", clusterName)
o.Expect(awsMachines).ShouldNot(o.BeEmpty())
for _, machine := range awsMachines {
clonedFromName := machine.GetAnnotationOrFail(`cluster.x-k8s.io/cloned-from-name`)
if clonedFromName == npName {
logger.Infof("deleting awsmachine %s", machine.GetName())
deleteMachineErr := machine.Delete()
if deleteMachineErr != nil {
// here we just log the error, will not terminate the clean up process.
// if any of the deletion is failed, it will be recycled by hypershift
logger.Errorf("delete awsmachine %s failed\n %v", machine.GetName(), deleteMachineErr)
} else {
logger.Infof("awsmachine %s is deleted successfully", machine.GetName())
}
}
}
logger.Infof("all the awsmachines of nodepool %s are deleted", npName)
NewNamespacedResource(ht.oc.AsAdmin(), HypershiftCrNodePool, ht.clusterNS, npName).DeleteOrFail()
logger.Infof("nodepool %s is deleted successfully", npName)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
9a86e37a-bdb1-4ba8-b972-187229d07f5e
|
CreateMcConfigMap
|
['"fmt"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) CreateMcConfigMap() {
exutil.By("create machine config in config map")
template := generateTemplateAbsolutePath(TmplHypershiftMcConfigMap)
cmName := fmt.Sprintf("mc-cm-%s", exutil.GetRandomString())
mcName := fmt.Sprintf("99-mc-test-%s", exutil.GetRandomString())
mcpName := MachineConfigPoolWorker
filePath := fmt.Sprintf("/home/core/test-%s", exutil.GetRandomString())
exutil.ApplyNsResourceFromTemplate(
ht.oc.AsAdmin(),
ht.clusterNS,
"--ignore-unknown-parameters=true",
"-f", template,
"-p",
"CMNAME="+cmName,
"MCNAME="+mcName,
"POOL="+mcpName,
"FILEPATH="+filePath,
)
// get config map to check it exists or not
cm := NewNamespacedResource(ht.oc.AsAdmin(), "cm", ht.clusterNS, cmName)
o.Expect(cm.Exists()).Should(o.BeTrue(), "mc config map does not exist")
logger.Debugf(cm.PrettyString())
logger.Infof("config map %s is created successfully", cmName)
ht.Put(TestCtxKeyConfigMap, cmName)
ht.Put(TestCtxKeyFilePath, filePath)
}
|
mco
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.