element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function
|
openshift/openshift-tests-private
|
391f8272-68df-4e95-978f-390f9a53aedb
|
skipIfDiskSpaceLessThanBytes
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
func skipIfDiskSpaceLessThanBytes(node Node, path string, minNumBytes int64) {
diskUsage, err := node.GetFileSystemSpaceUsage(path)
o.Expect(err).NotTo(o.HaveOccurred(),
"Cannot get the disk usage in node %s", node.GetName())
if minNumBytes > diskUsage.Avail {
g.Skip(fmt.Sprintf("Available diskspace in %s is %d bytes, which is less than the required %d bytes",
node.GetName(), diskUsage.Avail, minNumBytes))
}
logger.Infof("Required disk space %d bytes, available disk space %d", minNumBytes, diskUsage.Avail)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
a35f1e71-8807-46f4-84b7-114c6d544501
|
getCurrentReleaseInfoImageSpecOrDefault
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
func getCurrentReleaseInfoImageSpecOrDefault(oc *exutil.CLI, imageName, defaultImageName string) string {
image, err := getCurrentReleaseInfoImageSpec(oc, imageName)
if err != nil {
return defaultImageName
}
return image
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
38a3959b-07c2-4b0a-bf0a-fcd23bbce0f9
|
getCurrentReleaseInfoImageSpec
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
func getCurrentReleaseInfoImageSpec(oc *exutil.CLI, imageName string) (string, error) {
mMcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
allNodes, err := mMcp.GetNodes()
if err != nil {
return "", err
}
master := allNodes[0]
remoteAdminKubeConfig := fmt.Sprintf("/root/remoteKubeConfig-%s", exutil.GetRandomString())
adminKubeConfig := exutil.KubeConfigPath()
defer master.RemoveFile(remoteAdminKubeConfig)
err = master.CopyFromLocal(adminKubeConfig, remoteAdminKubeConfig)
if err != nil {
return "", err
}
stdout, _, err := master.DebugNodeWithChrootStd("oc", "adm", "release", "info", "--image-for", imageName, "--registry-config", "/var/lib/kubelet/config.json", "--kubeconfig", remoteAdminKubeConfig)
if err != nil {
return "", err
}
return stdout, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
093385c2-a6da-4b90-b0a2-8641a29c9e25
|
DigestMirrorTest
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
func DigestMirrorTest(oc *exutil.CLI, mcp *MachineConfigPool, idmsName, idmsMirrors, pinnedImage, pinnedImageSetName string) {
var (
allNodes = mcp.GetNodesOrFail()
waitForPinned = 10 * time.Minute
mcpsList = NewMachineConfigPoolList(oc.AsAdmin())
)
exutil.By("Remove the image from all nodes in the pool")
for _, node := range allNodes {
// We ignore errors, since the image can be present or not in the nodes
_ = NewRemoteImage(node, pinnedImage).Rmi()
}
logger.Infof("OK!\n")
exutil.By("Create new machine config to deploy a ImageDigestMirrorSet configuring a mirror registry")
idms := NewImageDigestMirrorSet(oc.AsAdmin(), idmsName, *NewMCOTemplate(oc, "add-image-digest-mirror-set.yaml"))
defer mcpsList.waitForComplete() // An ImageDisgestMirrorSet resource impacts all the pools in the cluster
defer idms.Delete()
idms.Create("-p", "NAME="+idmsName, "IMAGEDIGESTMIRRORS="+idmsMirrors)
mcpsList.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Pin the mirrored image")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImage})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that the image is pinned")
for _, node := range allNodes {
ri := NewRemoteImage(node, pinnedImage)
logger.Infof("Checking %s", ri)
o.Expect(ri.IsPinned()).To(o.BeTrue(),
"%s is not pinned, but it should. %s")
}
logger.Infof("OK!\n")
}
|
mco
| ||||
test case
|
openshift/openshift-tests-private
|
176f818a-2f58-45fc-a530-c1fe7b9cf19a
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73659-[P1][OnCLayer] Pinned images when disk-pressure [Disruptive]
|
['"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73659-[P1][OnCLayer] Pinned images when disk-pressure [Disruptive]", func() {
var (
waitForPinned = time.Minute * 5
pinnedImageSetName = "tc-73659-pin-images-disk-pressure"
pinnedImageName = BusyBoxImage
allNodes = mcp.GetNodesOrFail()
node = allNodes[0]
cleanFileTimedService = generateTemplateAbsolutePath("tc-73659-clean-file-timed.service")
cleanFileTimedServiceDestinationPath = "/etc/systemd/system/tc-73659-clean-file-timed.service"
)
exutil.By("Get disk usage in node")
diskUsage, err := node.GetFileSystemSpaceUsage("/var/lib/containers/storage/")
o.Expect(err).NotTo(o.HaveOccurred(),
"Cannot get the disk usage in node %s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Create a timed service that will restore the original disk usage after 5 minutes")
logger.Infof("Copy the service in the node")
defer node.DebugNodeWithChroot("rm", cleanFileTimedServiceDestinationPath)
o.Expect(node.CopyFromLocal(cleanFileTimedService, cleanFileTimedServiceDestinationPath)).
NotTo(o.HaveOccurred(),
"Error copying %s to %s in node %s", cleanFileTimedService, cleanFileTimedServiceDestinationPath, node.GetName())
// We create transient timer that will execute the sercive, this service will restore the disk usage to its original usage
logger.Infof("Create a transient timer to execute the service after 5 mintues")
// If an error happens, the transient timer will not be deleted unless we execute this command
defer node.DebugNodeWithChroot("systemctl", "reset-failed", "tc-73659-clean-file-timed.service")
defer node.DebugNodeWithChroot("systemctl", "stop", "tc-73659-clean-file-timed.service")
_, err = node.DebugNodeWithChroot("systemd-run", `--on-active=5minutes`, `--unit=tc-73659-clean-file-timed.service`)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the transient timer")
logger.Infof("OK!\n")
exutil.By("Create a huge file so that the node reports disk pressure. Use about 90 per cent of the free space in the disk")
fileSize := ((diskUsage.Avail + diskUsage.Used) * 9 / 10) - diskUsage.Used // calculate the file size to use a 90% of the disk space
o.Expect(fileSize).To(o.And(
o.BeNumerically("<", diskUsage.Avail),
o.BeNumerically(">", 0)),
"Error not enough space on device to execute this test. Available: %d, Used %d", diskUsage.Avail, diskUsage.Used)
_, err = node.DebugNodeWithChroot("fallocate", "-l", fmt.Sprintf("%d", fileSize), "/var/lib/containers/storage/tc-73659-huge-test-file.file")
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a file to trigger disk pressure")
logger.Infof("OK!\n")
exutil.By("Wait for disk pressure to be reported")
// It makes no sense to wait longer than the 5 minutes time out that we use to fix the disk usage.
// If we need to increse this timeout, we need to increase the transiente timer too
o.Eventually(&node, "5m", "20s").Should(HaveConditionField("DiskPressure", "status", TrueString),
"Node is not reporting DiskPressure, but it should.\n%s", node.PrettyString())
logger.Infof("OK!\n")
exutil.By("Pin images")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImageName})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Check the degraded status")
logger.Infof("Check that the node with disk pressure is reporting pinnedimagesetdegraded status")
mcn := node.GetMachineConfigNode()
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", TrueString),
"MachineConfigNode was not degraded.\n%s\n%s", mcn.PrettyString(), node.PrettyString())
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "reason", "PrefetchFailed"),
"MachineConfigNode was not degraded with the expected reason.\n%s\n%s", mcn.PrettyString(), node.PrettyString())
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "message", `node `+node.GetName()+` is reporting OutOfDisk=True`),
"MachineConfigNode was not degraded with the expected message.\n%s\n%s", mcn.PrettyString(), node.PrettyString())
logger.Infof("Check that the rest of the nodes could pin the image and are not degraded")
for _, n := range allNodes {
if n.GetName() != node.GetName() {
logger.Infof("Checking node %s", n.GetName())
o.Eventually(n.GetMachineConfigNode, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", FalseString),
"MachineConfigNode was degraded.\n%s\n%s", node.GetMachineConfigNode().PrettyString(), node.PrettyString())
rmi := NewRemoteImage(n, pinnedImageName)
o.Eventually(rmi.IsPinned, "5m", "20s").Should(o.BeTrue(), "%s should be pinned but it is not", rmi)
}
}
logger.Infof("OK!\n")
exutil.By("Wait for disk pressure to be fixed") // It should be fixed by the timed service that was created before
o.Eventually(&node, "20m", "20s").Should(HaveConditionField("DiskPressure", "status", FalseString),
"Node is reporting DiskPressure, but it should not.\n%s", node.PrettyString())
logger.Infof("OK!\n")
exutil.By("Check that the degraded status was fixed")
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", FalseString),
"MachineConfigNode was not degraded.\n%s\n%s", mcn.PrettyString(), node.PrettyString())
o.Eventually(NewRemoteImage(node, pinnedImageName).IsPinned, "2m", "20s").Should(o.BeTrue(),
"The degraded status was fixed, but the image was not pinned")
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
581bd039-82d5-4591-b3c0-6329cb844aac
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-High-73623-[P2][OnCLayer] Pin images [Disruptive]
|
['"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-High-73623-[P2][OnCLayer] Pin images [Disruptive]", func() {
var (
waitForPinned = time.Minute * 15
pinnedImageSetName = "tc-73623-pin-images"
node = mcp.GetNodesOrFail()[0]
firstPinnedImage = NewRemoteImage(node, BusyBoxImage)
secondPinnedImage = NewRemoteImage(node, AlpineImage)
)
exutil.By("Remove images")
_ = firstPinnedImage.Rmi()
_ = secondPinnedImage.Rmi()
logger.Infof("OK!\n")
exutil.By("Pin images")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{firstPinnedImage.ImageName, secondPinnedImage.ImageName})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that the images are pinned")
o.Expect(firstPinnedImage.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", firstPinnedImage)
o.Expect(secondPinnedImage.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", secondPinnedImage)
logger.Infof("OK!\n")
exutil.By("Patch the pinnedimageset and remove one image")
o.Expect(
pis.Patch("json", fmt.Sprintf(`[{"op": "replace", "path": "/spec/pinnedImages", "value": [{"name": "%s"}]}]`, firstPinnedImage.ImageName)),
).To(o.Succeed(),
"Error patching %s to remove one image")
logger.Infof("OK!\n")
exutil.By("Wait for the pinnedimageset changes to be applied")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that only the image reamaining in the pinnedimageset is pinned")
o.Expect(firstPinnedImage.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", firstPinnedImage)
o.Expect(secondPinnedImage.IsPinned()).To(o.BeFalse(), "%s is pinned, but it should NOT", secondPinnedImage)
logger.Infof("OK!\n")
exutil.By("Remove the pinnedimageset")
o.Expect(pis.Delete()).To(o.Succeed(), "Error removing %s", pis)
logger.Infof("OK!\n")
exutil.By("Wait for the pinnedimageset removal to be applied")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that only the image reamaining in the pinnedimageset is pinned")
o.Expect(firstPinnedImage.IsPinned()).To(o.BeFalse(), "%s is pinned, but it should NOT", firstPinnedImage)
o.Expect(secondPinnedImage.IsPinned()).To(o.BeFalse(), "%s is pinned, but it should NOT", secondPinnedImage)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
a57c6269-a9b9-4efd-8c61-50769cf3cfcb
|
Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-High-73653-[OnCLayer] Pinned images with a ImageDigestMirrorSet mirroring a single repository [Disruptive]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
g.It("Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-High-73653-[OnCLayer] Pinned images with a ImageDigestMirrorSet mirroring a single repository [Disruptive]", func() {
var (
idmsName = "tc-73653-mirror-single-repository"
idmsMirrors = `[{"mirrors":["quay.io/openshifttest/busybox"], "source": "example-repo.io/digest-example/mybusy", "mirrorSourcePolicy":"NeverContactSource"}]`
// actually quay.io/openshifttest/busybox@sha256:c5439d7db88ab5423999530349d327b04279ad3161d7596d2126dfb5b02bfd1f but using our configured mirror instead
pinnedImage = strings.Replace(BusyBoxImage, "quay.io/openshifttest/busybox", "example-repo.io/digest-example/mybusy", 1)
pinnedImageSetName = "tc-73653-mirror-single-repository"
)
DigestMirrorTest(oc, mcp, idmsName, idmsMirrors, pinnedImage, pinnedImageSetName)
})
| |||||
test case
|
openshift/openshift-tests-private
|
9ed2f2f3-a3ae-413c-8875-7abc65bb715d
|
Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-High-73657-[P1][OnCLayer] Pinned images with a ImageDigestMirrorSet mirroring a domain [Disruptive]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
g.It("Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-High-73657-[P1][OnCLayer] Pinned images with a ImageDigestMirrorSet mirroring a domain [Disruptive]", func() {
var (
idmsName = "tc-73657-mirror-domain"
idmsMirrors = `[{"mirrors":["quay.io:443"], "source": "example-domain.io:443", "mirrorSourcePolicy":"NeverContactSource"}]`
// actually quay.io/openshifttest/busybox@sha256:c5439d7db88ab5423999530349d327b04279ad3161d7596d2126dfb5b02bfd1f but using our configured mirror instead
pinnedImage = strings.Replace(BusyBoxImage, "quay.io", "example-domain.io:443", 1)
pinnedImageSetName = "tc-73657-mirror-domain"
)
DigestMirrorTest(oc, mcp, idmsName, idmsMirrors, pinnedImage, pinnedImageSetName)
})
| |||||
test case
|
openshift/openshift-tests-private
|
cac39b64-1b8f-4572-865f-f2dd9d4ccb6d
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-73361-[P2][OnCLayer] Pinnedimageset invalid pinned images [Disruptive]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-73361-[P2][OnCLayer] Pinnedimageset invalid pinned images [Disruptive]", func() {
var (
invalidPinnedImage = "quay.io/openshiftfake/fakeimage@sha256:0415f56ccc05526f2af5a7ae8654baec97d4a614f24736e8eef41a4591f08019"
pinnedImageSetName = "tc-73361-invalid-pinned-image"
waitForPinned = 10 * time.Minute
)
exutil.By("Pin invalid image")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{invalidPinnedImage})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Check that MCNs are PinnedImageSetDegraded")
for _, node := range mcp.GetNodesOrFail() {
mcn := node.GetMachineConfigNode()
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", TrueString))
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "reason", "PrefetchFailed"))
}
logger.Infof("OK!\n")
exutil.By("Remove the pinnedimageset")
o.Expect(pis.Delete()).To(o.Succeed(), "Error removing %s", pis)
logger.Infof("OK!\n")
exutil.By("Check that MCNs are not PinnedImageSetDegraded anymore")
for _, node := range mcp.GetNodesOrFail() {
mcn := node.GetMachineConfigNode()
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", FalseString))
}
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
bbef13d8-b559-48a0-8b9e-b42fa07751c6
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73631-[OnCLayer] Pinned images garbage collection [Disruptive]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73631-[OnCLayer] Pinned images garbage collection [Disruptive]", func() {
var (
waitForPinned = time.Minute * 5
pinnedImageSetName = "tc-73631-pinned-images-garbage-collector"
gcKubeletConfig = `{"imageMinimumGCAge": "0s", "imageGCHighThresholdPercent": 2, "imageGCLowThresholdPercent": 1}`
kcTemplate = generateTemplateAbsolutePath("generic-kubelet-config.yaml")
kcName = "tc-73631-pinned-garbage-collector"
node = mcp.GetNodesOrFail()[0]
startTime = node.GetDateOrFail()
pinnedImage = NewRemoteImage(node, BusyBoxImage)
manuallyPulledImage = NewRemoteImage(node, AlpineImage)
)
exutil.By("Remove the test images")
_ = pinnedImage.Rmi()
_ = manuallyPulledImage.Rmi()
logger.Infof("OK!\n")
exutil.By("Configure kubelet to start garbage collection")
logger.Infof("Create worker KubeletConfig")
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
defer mcp.waitForComplete()
defer kc.Delete()
kc.create("KUBELETCONFIG="+gcKubeletConfig, "POOL="+mcp.GetName())
exutil.By("Wait for configurations to be applied in worker pool")
mcp.waitForComplete()
logger.Infof("OK!\n")
logger.Infof("Pin image")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImage.ImageName})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Manually pull image")
o.Expect(manuallyPulledImage.Pull()).To(o.Succeed(),
"Error pulling %s", manuallyPulledImage)
logger.Infof("Check that the manually pulled image is not pinned")
o.Expect(manuallyPulledImage.IsPinned()).To(o.BeFalse(),
"Error, %s is pinned, but it should not", manuallyPulledImage)
logger.Infof("OK!\n")
exutil.By("Check that the manually pulled image is garbage collected")
o.Eventually(manuallyPulledImage, "25m", "20s").ShouldNot(Exist(),
"Error, %s has not been garbage collected", manuallyPulledImage)
logger.Infof("OK!\n")
exutil.By("Check that the pinned image is still pinned after garbage collection")
o.Eventually(pinnedImage.IsPinned, "2m", "10s").Should(o.BeTrue(),
"Error, after the garbage collection happened %s is not pinned anymore", pinnedImage)
logger.Infof("OK!\n")
exutil.By("Reboot node")
o.Expect(node.Reboot()).To(o.Succeed(),
"Error rebooting node %s", node.GetName())
o.Eventually(node.GetUptime, "15m", "30s").Should(o.BeTemporally(">", startTime),
"%s was not properly rebooted", node)
logger.Infof("OK!\n")
exutil.By("Check that the pinned image is still pinned after reboot")
o.Expect(pinnedImage.IsPinned()).To(o.BeTrue(),
"Error, after the garbage collection happened %s is not pinned anymore", pinnedImage)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
518235f6-9c7e-4cc0-b57e-24932c3721bb
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73635-[P1][OnCLayer] Pod can use pinned images while no access to the registry [Disruptive]
|
['"fmt"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73635-[P1][OnCLayer] Pod can use pinned images while no access to the registry [Disruptive]", func() {
var (
waitForPinned = time.Minute * 5
pinnedImageSetName = "tc-73635-pinned-images-no-registry"
// We pin the current release's tools image
// if we cannot get the "tools" image it means we are in a disconnected cluster
// and in disconnected clusters openshifttest images are mirrored and need the credentials for the mirror too
// so if we cannot get the "tools" image we can use the "busybox" one.
pinnedImage = getCurrentReleaseInfoImageSpecOrDefault(oc.AsAdmin(), "tools", BusyBoxImage)
allNodes = mcp.GetNodesOrFail()
pullSecret = GetPullSecret(oc.AsAdmin())
deploymentName = "tc-73635-test"
deploymentNamespace = oc.Namespace()
deployment = NewNamespacedResource(oc, "deployment", deploymentNamespace, deploymentName)
scaledReplicas = 5
nodeList = NewNamespacedResourceList(oc, "pod", deploymentNamespace)
)
defer nodeList.PrintDebugCommand() // for debugging purpose in case of failed deployment
exutil.By("Remove the image from all nodes in the pool")
for _, node := range allNodes {
// We ignore errors, since the image can be present or not in the nodes
_ = NewRemoteImage(node, pinnedImage).Rmi()
}
logger.Infof("OK!\n")
exutil.By("Create pinnedimageset")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImage})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that the image was pinned in all nodes in the pool")
for _, node := range allNodes {
ri := NewRemoteImage(node, pinnedImage)
logger.Infof("Checking %s", ri)
o.Expect(ri.IsPinned()).To(o.BeTrue(),
"%s is not pinned, but it should. %s")
}
logger.Infof("OK!\n")
exutil.By("Capture the current pull-secret value")
// We don't use the pullSecret resource directly, instead we use auxiliary functions that will
// extract and restore the secret's values using a file. Like that we can recover the value of the pull-secret
// if our execution goes wrong, without printing it in the logs (for security reasons).
secretFile, err := getPullSecret(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the pull-secret")
logger.Debugf("Pull-secret content stored in file %s", secretFile)
defer func() {
logger.Infof("Restoring initial pull-secret value")
output, err := setDataForPullSecret(oc, secretFile)
if err != nil {
logger.Errorf("Error restoring the pull-secret's value. Error: %s\nOutput: %s", err, output)
}
wMcp.waitForComplete()
mMcp.waitForComplete()
}()
logger.Infof("OK!\n")
exutil.By("Set an empty pull-secret")
o.Expect(pullSecret.SetDataValue(".dockerconfigjson", "{}")).To(o.Succeed(),
"Error setting an empty pull-secret value")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the image is pinned")
for _, node := range allNodes {
logger.Infof("Checking node %s", node.GetName())
ri := NewRemoteImage(node, pinnedImage)
o.Expect(ri.IsPinned()).To(o.BeTrue(),
"%s is not pinned, but it should. %s")
}
logger.Infof("OK!\n")
exutil.By("Create test deployment")
defer deployment.Delete()
o.Expect(
NewMCOTemplate(oc.AsAdmin(), "create-deployment.yaml").Create("-p", "NAME="+deploymentName, "IMAGE="+pinnedImage, "NAMESPACE="+deploymentNamespace),
).To(o.Succeed(),
"Error creating the deployment")
o.Eventually(deployment, "6m", "15s").Should(BeAvailable(),
"Resource is NOT available:\n/%s", deployment.PrettyString())
o.Eventually(deployment.Get, "6m", "15s").WithArguments(`{.status.readyReplicas}`).Should(o.Equal(deployment.GetOrFail(`{.spec.replicas}`)),
"Resource is NOT stable, still creating replicas:\n/%s", deployment.PrettyString())
logger.Infof("OK!\n")
exutil.By("Scale app")
o.Expect(
deployment.Patch("merge", fmt.Sprintf(`{"spec":{"replicas":%d}}`, scaledReplicas)),
).To(o.Succeed(),
"Error scaling %s", deployment)
o.Eventually(deployment, "6m", "15s").Should(BeAvailable(),
"Resource is NOT available:\n/%s", deployment.PrettyString())
o.Eventually(deployment.Get, "6m", "15s").WithArguments(`{.status.readyReplicas}`).Should(o.Equal(deployment.GetOrFail(`{.spec.replicas}`)),
"Resource is NOT stable, still creating replicas:\n/%s", deployment.PrettyString())
logger.Infof("OK!\n")
exutil.By("Reboot nodes")
for _, node := range allNodes {
o.Expect(node.Reboot()).To(o.Succeed(), "Error rebooting node %s", node)
}
for _, node := range allNodes {
_, err := node.DebugNodeWithChroot("hostname")
o.Expect(err).NotTo(o.HaveOccurred(), "Node %s was not recovered after rebot", node)
}
logger.Infof("OK!\n")
exutil.By("Check that the applicaion is OK after the reboot")
o.Eventually(deployment, "6m", "15s").Should(BeAvailable(),
"Resource is NOT available:\n/%s", deployment.PrettyString())
o.Eventually(deployment.Get, "6m", "15s").WithArguments(`{.status.readyReplicas}`).Should(o.Equal(deployment.GetOrFail(`{.spec.replicas}`)),
"Resource is NOT stable, still creating replicas:\n/%s", deployment.PrettyString())
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
6b4c63e3-a63a-47fb-af7d-ad138413d4f8
|
Author:sregidor-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-Longduration-Medium-73630-[P2][OnCLayer] Pin release images [Disruptive]
|
['"math"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
g.It("Author:sregidor-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-Longduration-Medium-73630-[P2][OnCLayer] Pin release images [Disruptive]", func() {
var (
waitForPinned = time.Minute * 30
pinnedImageSetName = "tc-73630-pinned-imageset-release"
pinnedImages = RemoveDuplicates(getReleaseInfoPullspecOrFail(oc.AsAdmin()))
node = mcp.GetNodesOrFail()[0]
minGigasAvailableInNodes = 40
)
skipIfDiskSpaceLessThanBytes(node, "/var/lib/containers/storage/", int64(float64(minGigasAvailableInNodes)*(math.Pow(1024, 3))))
exutil.By("Create pinnedimageset to pin all pullSpec images")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), pinnedImages)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that all images were pinned")
for _, image := range pinnedImages {
ri := NewRemoteImage(node, image)
o.Expect(ri.IsPinned()).To(o.BeTrue(),
"%s is not pinned, but it should. %s")
}
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
4a514175-60e3-4eac-9941-1ef51f50c6da
|
Author:sserafin-NonHyperShiftHOST-NonPreRelease-Longduration-High-73648-[OnCLayer] A rebooted node reconciles with the pinned images status [Disruptive]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
g.It("Author:sserafin-NonHyperShiftHOST-NonPreRelease-Longduration-High-73648-[OnCLayer] A rebooted node reconciles with the pinned images status [Disruptive]", func() {
var (
waitForPinned = time.Minute * 5
pinnedImageSetName = "tc-73648-pinned-image"
pinnedImage = BusyBoxImage
allMasters = mMcp.GetNodesOrFail()
pullSecret = GetPullSecret(oc.AsAdmin())
)
exutil.By("Create pinnedimageset")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mMcp.GetName(), []string{pinnedImage})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mMcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mMcp)
logger.Infof("OK!\n")
exutil.By("Check that the image was pinned in all nodes in the pool")
for _, node := range allMasters {
ri := NewRemoteImage(node, pinnedImage)
logger.Infof("Checking %s", ri)
o.Expect(ri.IsPinned()).To(o.BeTrue(),
"%s is not pinned, but it should", ri)
}
logger.Infof("OK!\n")
exutil.By("Capture the current pull-secret value")
secretFile, err := getPullSecret(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the pull-secret")
logger.Debugf("Pull-secret content stored in file %s", secretFile)
defer func() {
logger.Infof("Restoring initial pull-secret value")
output, err := setDataForPullSecret(oc, secretFile)
if err != nil {
logger.Errorf("Error restoring the pull-secret's value. Error: %v\nOutput: %s", err, output)
}
wMcp.waitForComplete()
mMcp.waitForComplete()
}()
logger.Infof("OK!\n")
exutil.By("Set an empty pull-secret")
o.Expect(pullSecret.SetDataValue(".dockerconfigjson", "{}")).To(o.Succeed(),
"Error setting an empty pull-secret value")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
// find the node with the machine-config-controller
exutil.By("Get the mcc node")
var mcc = NewController(oc.AsAdmin())
mccMaster, err := mcc.GetNode()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the node where the MCO controller is running")
logger.Infof("OK!\n")
// reboot the node with mcc
exutil.By("Reboot node")
startTime := mccMaster.GetDateOrFail()
o.Expect(mccMaster.Reboot()).To(o.Succeed(), "Error rebooting node %s", mccMaster)
logger.Infof("OK!\n")
// delete the pinnedImageSet
exutil.By("Delete the pinnedimageset")
o.Eventually(pis.Delete, "13m", "20s").ShouldNot(o.HaveOccurred(), "Error deleting pinnedimageset %s", pis)
logger.Infof("OK!\n")
// wait for the rebooted node
exutil.By("Wait for the rebooted node")
o.Eventually(mccMaster.GetUptime, "15m", "30s").Should(o.BeTemporally(">", startTime),
"%s was not properly rebooted", mccMaster)
mMcp.waitForComplete()
o.Expect(mMcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mMcp)
logger.Infof("OK!\n")
// check pinned imageset is deleted in all nodes in the pool
exutil.By("Check that the images are not pinned in all nodes in the pool")
for _, node := range allMasters {
ri := NewRemoteImage(node, pinnedImage)
logger.Infof("Checking %s", ri)
o.Eventually(ri.IsPinned, "5m", "20s").Should(o.BeFalse(),
"%s is pinned, but it should not", ri)
}
logger.Infof("OK!\n")
})
| |||||
test
|
openshift/openshift-tests-private
|
01c44e48-accf-4278-8cdf-1a7f2791916d
|
mco_scale
|
import (
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path"
"strings"
"time"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
"gopkg.in/ini.v1"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
"github.com/tidwall/gjson"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
package mco
import (
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path"
"strings"
"time"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
"gopkg.in/ini.v1"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
"github.com/tidwall/gjson"
)
const (
clonedPrefix = "user-data-"
)
var _ = g.Describe("[sig-mco] MCO scale", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("mco-scale", exutil.KubeConfigPath())
// worker MachineConfigPool
wMcp *MachineConfigPool
mMcp *MachineConfigPool
)
g.JustBeforeEach(func() {
// Skip if no machineset
skipTestIfWorkersCannotBeScaled(oc.AsAdmin())
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
preChecks(oc)
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-63894-[P1] Scaleup using 4.1 cloud image[Disruptive]", func() {
var (
imageVersion = "4.1" // OCP4.1 ami for AWS and use-east2 zone: https://github.com/openshift/installer/blob/release-4.1/data/data/rhcos.json
numNewNodes = 1 // the number of nodes scaled up in the new Machineset
)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform) // Scale up using 4.1 is only supported in AWS. GCP is only supported in versions 4.6+, and Vsphere in 4.2+
skipTestIfFIPSIsEnabled(oc.AsAdmin()) // fips was supported for the first time in 4.3, hence it is not supported to scale 4.1 and 4.2 base images in clusters with fips=true
architecture.SkipNonAmd64SingleArch(oc) // arm64 is not supported until 4.12
// Apply workaround
// Because of https://issues.redhat.com/browse/OCPBUGS-27273 this test case fails when the cluster has imagecontentsourcepolicies
// In prow jobs clusters have 2 imagecontentsourcepolicies (brew-registry and ), we try to remove them to execute this test
// It only happens using 4.1 base images. The issue was fixed in 4.2
// For debugging purposes
oc.AsAdmin().Run("get").Args("ImageContentSourcePolicy").Execute()
oc.AsAdmin().Run("get").Args("ImageTagMirrorSet").Execute()
oc.AsAdmin().Run("get").Args("ImageDigestMirrorSet").Execute()
cleanedICSPs := []*Resource{NewResource(oc.AsAdmin(), "ImageContentSourcePolicy", "brew-registry"), NewResource(oc.AsAdmin(), "ImageContentSourcePolicy", "image-policy")}
logger.Warnf("APPLYING WORKAROUND FOR https://issues.redhat.com/browse/OCPBUGS-27273. Removing expected imageocontentsourcepolicies")
removedICSP := false
defer func() {
if removedICSP {
wMcp.waitForComplete()
mMcp.WaitImmediateForUpdatedStatus()
}
}()
for _, item := range cleanedICSPs {
icsp := item
if icsp.Exists() {
logger.Infof("Cleaning the spec of %s", icsp)
defer icsp.SetSpec(icsp.GetSpecOrFail())
o.Expect(icsp.SetSpec("{}")).To(o.Succeed(),
"Error cleaning %s spec", icsp)
removedICSP = true
}
}
if removedICSP {
wMcp.waitForComplete()
o.Expect(mMcp.WaitImmediateForUpdatedStatus()).To(o.Succeed())
} else {
logger.Infof("No ICSP was removed!!")
}
SimpleScaleUPTest(oc, wMcp, imageVersion, getUserDataIgnitionVersionFromOCPVersion(imageVersion), numNewNodes)
})
// 4.3 is the first image supporting fips
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-77051-[P2] Scaleup using 4.3 cloud image[Disruptive]", func() {
var (
imageVersion = "4.3"
numNewNodes = 1 // the number of nodes scaled up in the new Machineset
)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, VspherePlatform) // Scale up using 4.3 is only supported in AWS, and Vsphere. GCP is only supported by our automation in versions 4.6+
architecture.SkipNonAmd64SingleArch(oc) // arm64 is not supported by OCP until 4.12
SimpleScaleUPTest(oc, wMcp, imageVersion, getUserDataIgnitionVersionFromOCPVersion(imageVersion), numNewNodes)
})
// 4.12 is the last version using rhel8, in 4.13 ocp starts using rhel9
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-76471-[P1] Scaleup using 4.12 cloud image[Disruptive]", func() {
var (
imageVersion = "4.12"
numNewNodes = 1 // the number of nodes scaled up in the new Machineset
)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, GCPPlatform, VspherePlatform) // Scale up using 4.12 is only supported in AWS, GCP and Vsphere
SimpleScaleUPTest(oc, wMcp, imageVersion, getUserDataIgnitionVersionFromOCPVersion(imageVersion), numNewNodes)
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-52822-[P1] Create new config resources with 2.2.0 ignition boot image nodes [Disruptive]", func() {
var (
newMsName = "copied-machineset-modified-tc-52822"
kcName = "change-maxpods-kubelet-config"
kcTemplate = generateTemplateAbsolutePath(kcName + ".yaml")
crName = "change-ctr-cr-config"
crTemplate = generateTemplateAbsolutePath(crName + ".yaml")
mcName = "generic-config-file-test-52822"
mcpWorker = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
// Set the 4.5 boot image ami for east-2 zone.
// the right ami should be selected from here https://github.com/openshift/installer/blob/release-4.5/data/data/rhcos.json
imageVersion = "4.5"
numNewNodes = 1 // the number of nodes scaled up in the new Machineset
)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, VspherePlatform) // Scale up using 4.5 is only supported for AWS and Vsphere. GCP is only supported in versions 4.6+
architecture.SkipNonAmd64SingleArch(oc) // arm64 is not supported until 4.11
initialNumWorkers := len(wMcp.GetNodesOrFail())
defer func() {
logger.Infof("Start TC defer block")
newMs := NewMachineSet(oc.AsAdmin(), MachineAPINamespace, newMsName)
errors := o.InterceptGomegaFailures(func() { // We don't want gomega to fail and stop the deferred cleanup process
removeClonedMachineSet(newMs, wMcp, initialNumWorkers)
cr := NewContainerRuntimeConfig(oc.AsAdmin(), crName, crTemplate)
if cr.Exists() {
logger.Infof("Removing ContainerRuntimeConfig %s", cr.GetName())
o.Expect(cr.Delete()).To(o.Succeed(), "Error removing %s", cr)
}
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
if kc.Exists() {
logger.Infof("Removing KubeletConfig %s", kc.GetName())
o.Expect(kc.Delete()).To(o.Succeed(), "Error removing %s", kc)
}
// MachineConfig struct has not been refactored to compose the "Resource" struct
// so there is no "Exists" method available. Use it after refactoring MachineConfig
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
logger.Infof("Removing machineconfig %s", mcName)
mc.delete()
})
if len(errors) != 0 {
logger.Infof("There were errors restoring the original MachineSet resources in the cluster")
for _, e := range errors {
logger.Errorf(e)
}
}
logger.Infof("Waiting for worker pool to be updated")
mcpWorker.waitForComplete()
// We don't want the test to pass if there were errors while restoring the initial state
o.Expect(len(errors)).To(o.BeZero(),
"There were %d errors while recovering the cluster's initial state", len(errors))
logger.Infof("End TC defer block")
}()
// Duplicate an existing MachineSet
allMs, err := NewMachineSetList(oc, MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of MachineSet resources")
ms := allMs[0]
newMs := cloneMachineSet(oc.AsAdmin(), ms, newMsName, imageVersion, getUserDataIgnitionVersionFromOCPVersion(imageVersion))
// KubeletConfig
exutil.By("Create KubeletConfig")
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
kc.create()
kc.waitUntilSuccess("10s")
logger.Infof("OK!\n")
// ContainterRuntimeConfig
exutil.By("Create ContainterRuntimeConfig")
cr := NewContainerRuntimeConfig(oc.AsAdmin(), crName, crTemplate)
cr.create()
cr.waitUntilSuccess("10s")
logger.Infof("OK!\n")
// Generic machineconfig
exutil.By("Create generic config file")
genericConfigFilePath := "/etc/test-52822"
genericConfig := "config content for test case 52822"
fileConfig := getURLEncodedFileConfig(genericConfigFilePath, genericConfig, "420")
template := NewMCOTemplate(oc, "generic-machine-config-template.yml")
errCreate := template.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(errCreate).NotTo(o.HaveOccurred(), "Error creating MachineConfig %s", mcName)
logger.Infof("OK!\n")
// Wait for all pools to apply the configs
exutil.By("Wait for worker MCP to be updated")
mcpWorker.waitForComplete()
logger.Infof("OK!\n")
// Scale up the MachineSet
exutil.By("Scale MachineSet up")
logger.Infof("Scaling up machineset %s", newMs.GetName())
scaleErr := newMs.ScaleTo(numNewNodes)
o.Expect(scaleErr).NotTo(o.HaveOccurred(), "Error scaling up MachineSet %s", newMs.GetName())
logger.Infof("Waiting %s machineset for being ready", newMsName)
o.Eventually(newMs.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", newMs.GetName())
logger.Infof("OK!\n")
exutil.By("Check that worker pool is increased and updated")
o.Eventually(wMcp.GetNodesOrFail, "5m", "30s").Should(o.HaveLen(initialNumWorkers+numNewNodes),
"The worker pool has not added the new nodes created by the new Machineset.\n%s", wMcp.PrettyString())
// Verify that the scaled nodes has been configured properly
exutil.By("Check config in the new node")
newNodes, nErr := newMs.GetNodes()
o.Expect(nErr).NotTo(o.HaveOccurred(), "Error getting the nodes created by MachineSet %s", newMs.GetName())
o.Expect(newNodes).To(o.HaveLen(numNewNodes), "Only %d nodes should have been created by MachineSet %s", numNewNodes, newMs.GetName())
newNode := newNodes[0]
logger.Infof("New node: %s", newNode.GetName())
logger.Infof("OK!\n")
exutil.By("Check kubelet config")
kcFile := NewRemoteFile(*newNode, "/etc/kubernetes/kubelet.conf")
kcrErr := kcFile.Fetch()
o.Expect(kcrErr).NotTo(o.HaveOccurred(), "Error reading kubelet config in node %s", newNode.GetName())
o.Expect(kcFile.GetTextContent()).Should(o.Or(o.ContainSubstring(`"maxPods": 500`), o.ContainSubstring(`maxPods: 500`)),
"File /etc/kubernetes/kubelet.conf has not the expected content")
logger.Infof("OK!\n")
exutil.By("Check container runtime config")
crFile := NewRemoteFile(*newNode, "/etc/containers/storage.conf")
crrErr := crFile.Fetch()
o.Expect(crrErr).NotTo(o.HaveOccurred(), "Error reading container runtime config in node %s", newNode.GetName())
o.Expect(crFile.GetTextContent()).Should(o.ContainSubstring("size = \"8G\""),
"File /etc/containers/storage.conf has not the expected content")
logger.Infof("OK!\n")
exutil.By("Check generic machine config")
cFile := NewRemoteFile(*newNode, genericConfigFilePath)
crErr := cFile.Fetch()
o.Expect(crErr).NotTo(o.HaveOccurred(), "Error reading generic config file in node %s", newNode.GetName())
o.Expect(cFile.GetTextContent()).Should(o.Equal(genericConfig),
"File %s has not the expected content", genericConfigFilePath)
logger.Infof("OK!\n")
exutil.By("Scale down and remove the cloned Machineset")
removeClonedMachineSet(newMs, wMcp, initialNumWorkers)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-65923-SSH key in scaled clusters [Disruptive]", func() {
// It is a safe assumpion that all the tested clusters will have a sshkey deployed in it.
// If at any moment this assumption is not safe anymore, we need to check for the sshkey to exist
// and create a MC to deploy a sskey in case of no sshkey deployed
var (
initialNumWorkers = len(wMcp.GetNodesOrFail())
numNewNodes = 1
)
defer wMcp.waitForComplete()
exutil.By("Scale up a machineset")
allMs, err := NewMachineSetList(oc, MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of MachineSet resources")
ms := allMs[0]
initialMsNodes, err := ms.GetNodes()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of nodes that belong to machineset %s", ms.GetName())
initialNumMsNodes := len(initialMsNodes)
logger.Infof("Scaling up machineset %s by 1", ms.GetName())
defer func() { _ = ms.ScaleTo(initialNumMsNodes) }()
o.Expect(ms.ScaleTo(initialNumMsNodes+numNewNodes)).NotTo(
o.HaveOccurred(),
"Error scaling up MachineSet %s", ms.GetName())
logger.Infof("OK!\n")
logger.Infof("Waiting %s machineset for being ready", ms)
o.Eventually(ms.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", ms.GetName())
logger.Infof("OK!\n")
exutil.By("Check that worker pool is increased and updated")
o.Eventually(wMcp.GetNodesOrFail, "5m", "30s").Should(o.HaveLen(initialNumWorkers+numNewNodes),
"The worker pool has not added the new nodes created by the new Machineset.\n%s", wMcp.PrettyString())
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the sshkey exists in all nodes")
currentWorkers := wMcp.GetNodesOrFail()
for _, node := range currentWorkers {
logger.Infof("Checking sshkey in node %s", node.GetName())
remoteSSHKey := NewRemoteFile(node, "/home/core/.ssh/authorized_keys.d/ignition")
o.Expect(remoteSSHKey.Fetch()).To(o.Succeed(),
"Error getting the content of the sshkey file in node %s", node.GetName())
o.Expect(remoteSSHKey.GetTextContent()).NotTo(o.BeEmpty(),
"The sshkey file has no content in node %s", node.GetName())
logger.Infof("Sshkey is OK in node %s", node.GetName())
}
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-64623-[P1] Machine Config Server CA rotation. IPI. [Disruptive]", func() {
var (
initialNumWorkers = len(wMcp.GetNodesOrFail())
numNewNodes = 1
)
// skip the test if fips is not enabled
skipTestIfFIPSIsNotEnabled(oc)
defer wMcp.waitForComplete()
exutil.By("Rotate MCS certificate")
initialMCSPods, err := GetMCSPodNames(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting MCS pod names")
logger.Infof("Current MCS pod names: %s", initialMCSPods)
o.Expect(
RotateMCSCertificates(oc.AsAdmin()),
// oc.AsAdmin().WithoutNamespace().Run("adm").Args("ocp-certificates", "regenerate-machine-config-server-serving-cert").Execute(),
).To(o.Succeed(),
"Error rotating MCS certificates")
logger.Infof("OK!\n")
exutil.By("Check that MCS pods were restarted")
o.Eventually(func(gm o.Gomega) {
// for debugging purposes
logger.Infof("Waiting for MCS pods to be restarted")
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", MachineConfigNamespace).Execute()
currentMCSPods, err := GetMCSPodNames(oc.AsAdmin())
gm.Expect(err).NotTo(o.HaveOccurred(),
"Error getting MCS pod names")
for _, initialMCSPod := range initialMCSPods {
gm.Expect(currentMCSPods).NotTo(o.ContainElement(initialMCSPod),
"MCS pod %s was not restarted after certs rotation", initialMCSPod)
}
}, "5m", "20s",
).Should(o.Succeed(),
"The MCS pods were not restarted after the MCS certificates were rotated")
logger.Infof("OK!\n")
exutil.By("Check that new machine-config-server-tls and machine-config-server-ca secrets are created")
tlsSecret := NewSecret(oc.AsAdmin(), MachineConfigNamespace, "machine-config-server-tls")
caSecret := NewSecret(oc.AsAdmin(), MachineConfigNamespace, "machine-config-server-ca")
o.Eventually(tlsSecret, "30s", "5s").Should(Exist(),
"%s secret does not exist in the MCO namespace after MCS cert rotations", tlsSecret.GetName())
o.Eventually(caSecret, "30s", "5s").Should(Exist(),
"%s secret does not exist in the MCO namespace after MCS cert rotations", tlsSecret.GetName())
logger.Infof("OK!\n")
exutil.By("Scale up a machineset")
allMs, err := NewMachineSetList(oc, MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of MachineSet resources")
ms := allMs[0]
initialMsNodes, err := ms.GetNodes()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of nodes that belong to machineset %s", ms.GetName())
initialNumMsNodes := len(initialMsNodes)
logger.Infof("Scaling up machineset %s by 1", ms.GetName())
defer func() { _ = ms.ScaleTo(initialNumMsNodes) }()
o.Expect(ms.ScaleTo(initialNumMsNodes+numNewNodes)).NotTo(
o.HaveOccurred(),
"Error scaling up MachineSet %s", ms.GetName())
logger.Infof("OK!\n")
logger.Infof("Waiting %s machineset for being ready", ms)
o.Eventually(ms.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", ms.GetName())
logger.Infof("OK!\n")
exutil.By("Check that worker pool is increased and updated")
o.Eventually(wMcp.GetNodesOrFail, "5m", "30s").Should(o.HaveLen(initialNumWorkers+numNewNodes),
"The worker pool has not added the new nodes created by the new Machineset.\n%s", wMcp.PrettyString())
wMcp.waitForComplete()
logger.Infof("All nodes are up and ready!")
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-73636-[P2][OnCLayer] Pinned images in scaled nodes [Disruptive]", func() {
// The pinnedimageset feature is currently only supported in techpreview
skipIfNoTechPreview(oc.AsAdmin())
var (
waitForPinned = time.Minute * 5
initialNumWorkers = len(wMcp.GetNodesOrFail())
numNewNodes = 3
pinnedImageSetName = "tc-73636-pinned-images-scale"
pinnedImageName = BusyBoxImage
)
exutil.By("Pin images")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, wMcp.GetName(), []string{pinnedImageName})
defer pis.DeleteAndWait(waitForPinned)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
logger.Infof("OK!\n")
exutil.By("Check that the pool is reporting the right pinnedimageset status")
o.Expect(wMcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", wMcp)
logger.Infof("OK!\n")
exutil.By("Check that the image was pinned in all nodes")
for _, node := range wMcp.GetNodesOrFail() {
rmi := NewRemoteImage(node, pinnedImageName)
o.Expect(rmi.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", rmi)
}
logger.Infof("OK!\n")
exutil.By("Scale up a machineset")
allMs, err := NewMachineSetList(oc, MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of MachineSet resources")
ms := allMs[0]
initialNumMsNodes := len(ms.GetNodesOrFail())
logger.Infof("Scaling up machineset %s by %d", ms.GetName(), numNewNodes)
defer func() {
_ = ms.ScaleTo(initialNumMsNodes)
wMcp.waitForComplete()
}()
o.Expect(ms.ScaleTo(initialNumMsNodes+numNewNodes)).NotTo(
o.HaveOccurred(),
"Error scaling up MachineSet %s", ms.GetName())
logger.Infof("OK!\n")
exutil.By("Check that worker pool is increased and updated")
o.Eventually(wMcp.GetNodesOrFail, "5m", "30s").Should(o.HaveLen(initialNumWorkers+numNewNodes),
"The worker pool has not added the new nodes created by the new Machineset.\n%s", wMcp.PrettyString())
wMcp.waitForComplete()
logger.Infof("All nodes are up and ready!")
logger.Infof("OK!\n")
exutil.By("Check that the pool is reporting the right pinnedimageset status")
o.Expect(wMcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", wMcp)
logger.Infof("OK!\n")
exutil.By("Check that the image was pinned in all nodes")
for _, node := range wMcp.GetNodesOrFail() {
rmi := NewRemoteImage(node, pinnedImageName)
o.Expect(rmi.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", rmi)
}
logger.Infof("OK!\n")
})
})
func cloneMachineSet(oc *exutil.CLI, ms MachineSet, newMsName, imageVersion, ignitionVersion string) *MachineSet {
var (
newSecretName = getClonedSecretName(newMsName)
platform = exutil.CheckPlatform(oc.AsAdmin())
)
// Duplicate an existing MachineSet
exutil.By("Duplicate a MachineSet resource")
logger.Infof("Create a new machineset that will use base image %s and ignition version %s", imageVersion, ignitionVersion)
newMs, dErr := ms.Duplicate(newMsName)
o.Expect(dErr).NotTo(o.HaveOccurred(), "Error duplicating MachineSet %s -n %s", ms.GetName(), ms.GetNamespace())
logger.Infof("OK!\n")
// Create a new secret using the given ignition version
exutil.By(fmt.Sprintf("Create a new secret with %s ignition version", ignitionVersion))
currentSecret := ms.GetOrFail(`{.spec.template.spec.providerSpec.value.userDataSecret.name}`)
logger.Infof("Duplicating secret %s with new name %s", currentSecret, newSecretName)
clonedSecret, sErr := duplicateMachinesetSecret(oc, currentSecret, newSecretName, ignitionVersion)
o.Expect(sErr).NotTo(o.HaveOccurred(), "Error duplicating machine-api secret")
o.Expect(clonedSecret).To(Exist(), "The secret was not duplicated for machineset %s", newMs)
logger.Infof("OK!\n")
// Get the right base image name from the rhcos json info stored in the github repositories
exutil.By(fmt.Sprintf("Get the base image for version %s", imageVersion))
rhcosHandler, err := GetRHCOSHandler(platform)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the rhcos handler")
architecture, err := GetArchitectureFromMachineset(&ms, platform)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the arechitecture from machineset %s", ms.GetName())
baseImage, err := rhcosHandler.GetBaseImageFromRHCOSImageInfo(imageVersion, architecture, getCurrentRegionOrFail(oc.AsAdmin()))
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the base image")
logger.Infof("Using base image %s", baseImage)
baseImageURL, err := rhcosHandler.GetBaseImageURLFromRHCOSImageInfo(imageVersion, architecture)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the base image URL")
// In vshpere we will upload the image. To avoid collisions we will add prefix to identify our image
if platform == VspherePlatform {
baseImage = "mcotest-" + baseImage
}
o.Expect(
uploadBaseImageToCloud(oc, platform, baseImageURL, baseImage),
).To(o.Succeed(), "Error uploading the base image %s to the cloud", baseImageURL)
logger.Infof("OK!\n")
// Set the new boot base image
exutil.By(fmt.Sprintf("Configure the duplicated MachineSet to use the %s boot image", baseImage))
o.Expect(newMs.SetCoreOsBootImage(baseImage)).To(o.Succeed(),
"There was an error while patching the new base image in %s", newMs)
logger.Infof("OK!\n")
// Use new secret
exutil.By("Configure the duplicated MachineSet to use the new secret")
err = newMs.Patch("json", `[{ "op": "replace", "path": "/spec/template/spec/providerSpec/value/userDataSecret/name", "value": "`+newSecretName+`" }]`)
o.Expect(err).NotTo(o.HaveOccurred(), "Error patching MachineSet %s to use the new secret %s", newMs.GetName(), newSecretName)
logger.Infof("OK!\n")
return newMs
}
func removeClonedMachineSet(ms *MachineSet, mcp *MachineConfigPool, expectedNumWorkers int) {
if ms.Exists() {
logger.Infof("Scaling %s machineset to zero", ms.GetName())
o.Expect(ms.ScaleTo(0)).To(o.Succeed(),
"Error scaling MachineSet %s to 0", ms.GetName())
logger.Infof("Waiting %s machineset for being ready", ms.GetName())
o.Eventually(ms.GetIsReady, "2m", "15s").Should(o.BeTrue(), "MachineSet %s is not ready", ms.GetName())
logger.Infof("Removing %s machineset", ms.GetName())
o.Expect(ms.Delete()).To(o.Succeed(),
"Error deleting MachineSet %s", ms.GetName())
if expectedNumWorkers >= 0 {
exutil.By("Check that worker pool is increased and updated")
// Before calling mcp.GetNodes we wait for the MachineCount number to settle, to avoid a panic due to nodes disappearing while we calculate the number of nodes
o.Eventually(mcp.getMachineCount, "5m", "30s").Should(o.Equal(expectedNumWorkers),
"The MachineCount has not the expected value in pool:\n%s", mcp.PrettyString())
o.Eventually(mcp.GetNodes, "5m", "30s").Should(o.HaveLen(expectedNumWorkers),
"The number of nodes is not the expected one in pool:\n%s", mcp.PrettyString())
}
}
clonedSecret := NewSecret(ms.oc, MachineAPINamespace, getClonedSecretName(ms.GetName()))
if clonedSecret.Exists() {
logger.Infof("Removing %s secret", clonedSecret)
o.Expect(clonedSecret.Delete()).To(o.Succeed(),
"Error deleting %s", ms.GetName())
}
}
// getRHCOSImagesInfo returns a string with the info about all the base images used by rhcos in the given version
func getRHCOSImagesInfo(version string) (string, error) {
var (
err error
resp *http.Response
numRetries = 3
retryDelay = time.Minute
rhcosURL = fmt.Sprintf("https://raw.githubusercontent.com/openshift/installer/release-%s/data/data/rhcos.json", version)
)
if CompareVersions(version, ">=", "4.10") {
rhcosURL = fmt.Sprintf("https://raw.githubusercontent.com/openshift/installer/release-%s/data/data/coreos/rhcos.json", version)
}
// To mitigate network errors we will retry in case of failure
logger.Infof("Getting rhcos image info from: %s", rhcosURL)
for i := 0; i < numRetries; i++ {
if i > 0 {
logger.Infof("Error while getting the rhcos mages json data: %s.\nWaiting %s and retrying. Num retries: %d", err, retryDelay, i)
time.Sleep(retryDelay)
}
resp, err = http.Get(rhcosURL)
if err == nil {
break
}
}
if err != nil {
return "", err
}
defer resp.Body.Close()
// We Read the response body
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body), nil
}
// convertArch transform amd64 naming into x86_64 naming
func convertArch(arch architecture.Architecture) string {
stringArch := ""
switch arch {
case architecture.AMD64:
stringArch = "x86_64"
case architecture.ARM64:
stringArch = "aarch64"
default:
stringArch = arch.String()
}
return stringArch
}
// getCurrentRegionOrFail returns the current region if we are in AWS or an empty string if any other platform
func getCurrentRegionOrFail(oc *exutil.CLI) string {
infra := NewResource(oc.AsAdmin(), "infrastructure", "cluster")
return infra.GetOrFail(`{.status.platformStatus.aws.region}`)
}
// SimpleScaleUPTest is a generic function that tests scaling up and down worker nodes using the base image corresponding to the given version
func SimpleScaleUPTest(oc *exutil.CLI, mcp *MachineConfigPool, imageVersion, ignitionVergsion string, numNewNodes int) {
var (
newMsName = fmt.Sprintf("mco-tc-%s-cloned", GetCurrentTestPolarionIDNumber())
initialNumWorkers = len(mcp.GetNodesOrFail())
)
defer func() {
logger.Infof("Start TC defer block")
newMs := NewMachineSet(oc.AsAdmin(), MachineAPINamespace, newMsName)
errors := o.InterceptGomegaFailures(func() { removeClonedMachineSet(newMs, mcp, initialNumWorkers) }) // We don't want gomega to fail and stop the deferred cleanup process
if len(errors) != 0 {
logger.Infof("There were errors restoring the original MachineSet resources in the cluster")
for _, e := range errors {
logger.Errorf(e)
}
}
// We don't want the test to pass if there were errors while restoring the initial state
o.Expect(len(errors)).To(o.BeZero(),
"There were %d errors while recovering the cluster's initial state", len(errors))
logger.Infof("End TC defer block")
}()
logger.Infof("Create a new MachineSet using the right base image")
allMs, err := NewMachineSetList(oc, MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of MachineSet resources")
ms := allMs[0]
newMs := cloneMachineSet(oc.AsAdmin(), ms, newMsName, imageVersion, ignitionVergsion)
exutil.By("Scale MachineSet up")
logger.Infof("Scaling up machineset %s", newMs.GetName())
scaleErr := newMs.ScaleTo(numNewNodes)
o.Expect(scaleErr).NotTo(o.HaveOccurred(), "Error scaling up MachineSet %s", newMs.GetName())
logger.Infof("Waiting %s machineset for being ready", newMsName)
o.Eventually(newMs.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", newMs.GetName())
logger.Infof("OK!\n")
exutil.By("Check that worker pool is increased and updated")
o.Eventually(mcp.GetNodesOrFail, "5m", "30s").Should(o.HaveLen(initialNumWorkers+numNewNodes),
"The worker pool has not added the new nodes created by the new Machineset.\n%s", mcp.PrettyString())
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Scale down and remove the cloned Machineset")
removeClonedMachineSet(newMs, mcp, initialNumWorkers)
logger.Infof("OK!\n")
}
func getClonedSecretName(msName string) string {
return clonedPrefix + msName
}
func GetRHCOSHandler(platform string) (RHCOSHandler, error) {
switch platform {
case AWSPlatform:
return AWSRHCOSHandler{}, nil
case GCPPlatform:
return GCPRHCOSHandler{}, nil
case VspherePlatform:
return VsphereRHCOSHandler{}, nil
default:
return nil, fmt.Errorf("Platform %s is not supported and cannot get RHCOSHandler", platform)
}
}
type RHCOSHandler interface {
GetBaseImageFromRHCOSImageInfo(version string, arch architecture.Architecture, region string) (string, error)
GetBaseImageURLFromRHCOSImageInfo(version string, arch architecture.Architecture) (string, error)
}
type AWSRHCOSHandler struct{}
func (aws AWSRHCOSHandler) GetBaseImageFromRHCOSImageInfo(version string, arch architecture.Architecture, region string) (string, error) {
var (
path string
stringArch = convertArch(arch)
platform = AWSPlatform
)
rhcosImageInfo, err := getRHCOSImagesInfo(version)
if err != nil {
return "", err
}
if region == "" {
return "", fmt.Errorf("Region cannot have an empty value when we try to get the base image in platform %s", platform)
}
if CompareVersions(version, "<", "4.10") {
path = `amis.` + region + `.hvm`
} else {
path = fmt.Sprintf("architectures.%s.images.%s.regions.%s.image", stringArch, platform, region)
}
logger.Infof("Looking for rhcos base image info in path %s", path)
baseImage := gjson.Get(rhcosImageInfo, path)
if !baseImage.Exists() {
logger.Infof("rhcos info:\n%s", rhcosImageInfo)
return "", fmt.Errorf("Could not find the base image for version <%s> in platform <%s> architecture <%s> and region <%s> with path %s",
version, platform, arch, region, path)
}
return baseImage.String(), nil
}
func (aws AWSRHCOSHandler) GetBaseImageURLFromRHCOSImageInfo(version string, arch architecture.Architecture) (string, error) {
return getBaseImageURLFromRHCOSImageInfo(version, "aws", "vmdk.gz", convertArch(arch))
}
type GCPRHCOSHandler struct{}
func (gcp GCPRHCOSHandler) GetBaseImageFromRHCOSImageInfo(version string, arch architecture.Architecture, region string) (string, error) {
var (
imagePath string
projectPath string
stringArch = convertArch(arch)
platform = GCPPlatform
)
if CompareVersions(version, "==", "4.1") {
return "", fmt.Errorf("There is no image base image supported for platform %s in version %s", platform, version)
}
rhcosImageInfo, err := getRHCOSImagesInfo(version)
if err != nil {
return "", err
}
if CompareVersions(version, "<", "4.10") {
imagePath = "gcp.image"
projectPath = "gcp.project"
} else {
imagePath = fmt.Sprintf("architectures.%s.images.%s.name", stringArch, platform)
projectPath = fmt.Sprintf("architectures.%s.images.%s.project", stringArch, platform)
}
logger.Infof("Looking for rhcos base image name in path %s", imagePath)
baseImage := gjson.Get(rhcosImageInfo, imagePath)
if !baseImage.Exists() {
logger.Infof("rhcos info:\n%s", rhcosImageInfo)
return "", fmt.Errorf("Could not find the base image for version <%s> in platform <%s> architecture <%s> and region <%s> with path %s",
version, platform, arch, region, imagePath)
}
logger.Infof("Looking for rhcos base image project in path %s", projectPath)
project := gjson.Get(rhcosImageInfo, projectPath)
if !project.Exists() {
logger.Infof("rhcos info:\n%s", rhcosImageInfo)
return "", fmt.Errorf("Could not find the project where the base image is stored with version <%s> in platform <%s> architecture <%s> and region <%s> with path %s",
version, platform, arch, region, projectPath)
}
return fmt.Sprintf("projects/%s/global/images/%s", project.String(), baseImage.String()), nil
}
func (gcp GCPRHCOSHandler) GetBaseImageURLFromRHCOSImageInfo(version string, arch architecture.Architecture) (string, error) {
return getBaseImageURLFromRHCOSImageInfo(version, "gcp", "tar.gz", convertArch(arch))
}
type VsphereRHCOSHandler struct{}
func (vsp VsphereRHCOSHandler) GetBaseImageFromRHCOSImageInfo(version string, arch architecture.Architecture, _ string) (string, error) {
baseImageURL, err := vsp.GetBaseImageURLFromRHCOSImageInfo(version, arch)
if err != nil {
return "", err
}
return path.Base(baseImageURL), nil
}
func (vsp VsphereRHCOSHandler) GetBaseImageURLFromRHCOSImageInfo(version string, arch architecture.Architecture) (string, error) {
return getBaseImageURLFromRHCOSImageInfo(version, "vmware", "ova", convertArch(arch))
}
func getBaseImageURLFromRHCOSImageInfo(version, platform, format, stringArch string) (string, error) {
var (
imagePath string
baseURIPath string
olderThan410 = CompareVersions(version, "<", "4.10")
)
rhcosImageInfo, err := getRHCOSImagesInfo(version)
if err != nil {
return "", err
}
if olderThan410 {
imagePath = fmt.Sprintf("images.%s.path", platform)
baseURIPath = "baseURI"
} else {
imagePath = fmt.Sprintf("architectures.%s.artifacts.%s.formats.%s.disk.location", stringArch, platform, strings.ReplaceAll(format, ".", `\.`))
}
logger.Infof("Looking for rhcos base image path name in path %s", imagePath)
baseImageURL := gjson.Get(rhcosImageInfo, imagePath)
if !baseImageURL.Exists() {
logger.Infof("rhcos info:\n%s", rhcosImageInfo)
return "", fmt.Errorf("Could not find the base image for version <%s> in platform <%s> architecture <%s> and format <%s> with path %s",
version, platform, stringArch, format, imagePath)
}
if !olderThan410 {
return baseImageURL.String(), nil
}
logger.Infof("Looking for baseURL in path %s", baseURIPath)
baseURI := gjson.Get(rhcosImageInfo, baseURIPath)
if !baseURI.Exists() {
logger.Infof("rhcos info:\n%s", rhcosImageInfo)
return "", fmt.Errorf("Could not find the base URI with version <%s> in platform <%s> architecture <%s> and format <%s> with path %s",
version, platform, stringArch, format, baseURIPath)
}
return fmt.Sprintf("%s/%s", strings.Replace(strings.Trim(baseURI.String(), "/"), "releases-art-rhcos.svc.ci.openshift.org", "rhcos.mirror.openshift.com", 1), strings.Trim(baseImageURL.String(), "/")), nil
}
func uploadBaseImageToCloud(oc *exutil.CLI, platform, baseImageURL, baseImage string) error {
switch platform {
case AWSPlatform:
logger.Infof("No need to updload images in AWS")
return nil
case GCPPlatform:
logger.Infof("No need to updload images in GCP")
return nil
case VspherePlatform:
server, dataCenter, dataStore, resourcePool, user, password, err := getvSphereCredentials(oc.AsAdmin())
if err != nil {
return err
}
err = uploadBaseImageToVsphere(baseImageURL, baseImage, server, dataCenter, dataStore, resourcePool, user, password)
if err != nil {
return err
}
return nil
default:
return fmt.Errorf("Platform %s is not supported, base image cannot be updloaded", platform)
}
}
func uploadBaseImageToVsphere(baseImageSrc, baseImageDest, server, dataCenter, dataStore, resourcePool, user, password string) error {
var (
execBin = "govc"
uploadCommand = []string{"import.ova", "--debug", "--name", baseImageDest, baseImageSrc}
upgradeHWCommand = []string{"vm.upgrade", "-vm", baseImageDest}
templateCommand = []string{"vm.markastemplate", baseImageDest}
govcEnv = []string{
"GOVC_URL=" + server,
"GOVC_USERNAME=" + user,
"GOVC_PASSWORD=" + password,
"GOVC_DATASTORE=" + dataStore,
"GOVC_RESOURCE_POOL=" + resourcePool,
"GOVC_DATACENTER=" + dataCenter,
"GOVC_INSECURE=true",
}
)
logger.Infof("Uploading base image %s to vsphere with name %s", baseImageSrc, baseImageDest)
logger.Infof("%s %s", execBin, uploadCommand)
uploadCmd := exec.Command(execBin, uploadCommand...)
originalEnv := os.Environ()
// In prow the GOVC_TLS_CA_CERTS is not correctly set and it is making the govc command fail.
// we remove this variable from the environment
var execEnv []string
for _, envVar := range originalEnv {
if strings.HasPrefix(envVar, "GOVC_TLS_CA_CERTS=") {
continue
}
execEnv = append(execEnv, envVar)
}
execEnv = append(execEnv, govcEnv...)
uploadCmd.Env = execEnv
out, err := uploadCmd.CombinedOutput()
logger.Infof(string(out))
if err != nil {
if strings.Contains(string(out), "already exists") {
logger.Infof("Image %s already exists in the cloud, we don't upload it again", baseImageDest)
} else {
return err
}
}
logger.Infof("Upgrading VM's hardware")
logger.Infof("%s %s", execBin, upgradeHWCommand)
upgradeCmd := exec.Command(execBin, upgradeHWCommand...)
upgradeCmd.Env = execEnv
out, err = upgradeCmd.CombinedOutput()
logger.Infof(string(out))
if err != nil {
// We don't fail. We log a warning and continue.
logger.Warnf("ERROR UPGRADING HARDWARE: %s", err)
}
logger.Infof("Transforming VM into template")
logger.Infof("%s %s", execBin, templateCommand)
templateCmd := exec.Command(execBin, templateCommand...)
templateCmd.Env = execEnv
out, err = templateCmd.CombinedOutput()
logger.Infof(string(out))
if err != nil {
// We don't fail. We log a warning and continue.
logger.Warnf("ERROR CONVERTING INTO TEMPLATE: %s", err)
}
return nil
}
func getvSphereCredentials(oc *exutil.CLI) (server, dataCenter, dataStore, resourcePool, user, password string, err error) {
var (
configCM = NewConfigMap(oc.AsAdmin(), "openshift-config", "cloud-provider-config")
credsSecret = NewSecret(oc.AsAdmin(), "kube-system", "vsphere-creds")
)
config, err := configCM.GetDataValue("config")
if err != nil {
return
}
cfg, err := ini.Load(strings.NewReader(config))
if err == nil {
logger.Infof("%s config info is in ini fomart. Extracting data", configCM)
server = cfg.Section("Workspace").Key("server").String()
dataCenter = cfg.Section("Workspace").Key("datacenter").String()
dataStore = cfg.Section("Workspace").Key("default-datastore").String()
resourcePool = cfg.Section("Workspace").Key("resourcepool-path").String()
} else {
logger.Infof("%s config info is NOT in ini fomart. Trying to extract the information from the infrastructure resource", configCM)
infra := NewResource(oc.AsAdmin(), "infrastructure", "cluster")
var failureDomain string
failureDomain, err = infra.Get(`{.spec.platformSpec.vsphere.failureDomains[0]}`)
if err != nil {
logger.Errorf("Cannot get the failureDomain from the infrastructure resource: %s", err)
return
}
if failureDomain == "" {
logger.Errorf("Failure domain is empty in the infrastructure resource: %s\n%s", err, infra.PrettyString())
err = fmt.Errorf("Empty failure domain in the infrastructure resource")
return
}
gserver := gjson.Get(failureDomain, "server")
if gserver.Exists() {
server = gserver.String()
} else {
err = fmt.Errorf("Cannot get the server value from failureDomain\n%s", infra.PrettyString())
return
}
gdataCenter := gjson.Get(failureDomain, "topology.datacenter")
if gdataCenter.Exists() {
dataCenter = gdataCenter.String()
} else {
err = fmt.Errorf("Cannot get the data center value from failureDomain\n%s", infra.PrettyString())
return
}
gdataStore := gjson.Get(failureDomain, "topology.datastore")
if gdataStore.Exists() {
dataStore = gdataStore.String()
} else {
err = fmt.Errorf("Cannot get the data store value from failureDomain\n%s", infra.PrettyString())
return
}
gresourcePool := gjson.Get(failureDomain, "topology.resourcePool")
if gresourcePool.Exists() {
resourcePool = gresourcePool.String()
} else {
err = fmt.Errorf("Cannot get the resourcepool value from failureDomain\n%s", infra.PrettyString())
return
}
}
decodedData, err := credsSecret.GetDecodedDataMap()
if err != nil {
return
}
for k, v := range decodedData {
item := v
if strings.Contains(k, "username") {
user = item
}
if strings.Contains(k, "password") {
password = item
}
}
if user == "" {
logger.Errorf("Empty vsphere user")
err = fmt.Errorf("The vsphere user is empty")
return
}
if password == "" {
logger.Errorf("Empty vsphere password")
err = fmt.Errorf("The vsphere password is empty")
return
}
return
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
5ea2513c-d855-4851-8504-30e4b0f4b130
|
cloneMachineSet
|
['"fmt"', '"path"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func cloneMachineSet(oc *exutil.CLI, ms MachineSet, newMsName, imageVersion, ignitionVersion string) *MachineSet {
var (
newSecretName = getClonedSecretName(newMsName)
platform = exutil.CheckPlatform(oc.AsAdmin())
)
// Duplicate an existing MachineSet
exutil.By("Duplicate a MachineSet resource")
logger.Infof("Create a new machineset that will use base image %s and ignition version %s", imageVersion, ignitionVersion)
newMs, dErr := ms.Duplicate(newMsName)
o.Expect(dErr).NotTo(o.HaveOccurred(), "Error duplicating MachineSet %s -n %s", ms.GetName(), ms.GetNamespace())
logger.Infof("OK!\n")
// Create a new secret using the given ignition version
exutil.By(fmt.Sprintf("Create a new secret with %s ignition version", ignitionVersion))
currentSecret := ms.GetOrFail(`{.spec.template.spec.providerSpec.value.userDataSecret.name}`)
logger.Infof("Duplicating secret %s with new name %s", currentSecret, newSecretName)
clonedSecret, sErr := duplicateMachinesetSecret(oc, currentSecret, newSecretName, ignitionVersion)
o.Expect(sErr).NotTo(o.HaveOccurred(), "Error duplicating machine-api secret")
o.Expect(clonedSecret).To(Exist(), "The secret was not duplicated for machineset %s", newMs)
logger.Infof("OK!\n")
// Get the right base image name from the rhcos json info stored in the github repositories
exutil.By(fmt.Sprintf("Get the base image for version %s", imageVersion))
rhcosHandler, err := GetRHCOSHandler(platform)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the rhcos handler")
architecture, err := GetArchitectureFromMachineset(&ms, platform)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the arechitecture from machineset %s", ms.GetName())
baseImage, err := rhcosHandler.GetBaseImageFromRHCOSImageInfo(imageVersion, architecture, getCurrentRegionOrFail(oc.AsAdmin()))
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the base image")
logger.Infof("Using base image %s", baseImage)
baseImageURL, err := rhcosHandler.GetBaseImageURLFromRHCOSImageInfo(imageVersion, architecture)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the base image URL")
// In vshpere we will upload the image. To avoid collisions we will add prefix to identify our image
if platform == VspherePlatform {
baseImage = "mcotest-" + baseImage
}
o.Expect(
uploadBaseImageToCloud(oc, platform, baseImageURL, baseImage),
).To(o.Succeed(), "Error uploading the base image %s to the cloud", baseImageURL)
logger.Infof("OK!\n")
// Set the new boot base image
exutil.By(fmt.Sprintf("Configure the duplicated MachineSet to use the %s boot image", baseImage))
o.Expect(newMs.SetCoreOsBootImage(baseImage)).To(o.Succeed(),
"There was an error while patching the new base image in %s", newMs)
logger.Infof("OK!\n")
// Use new secret
exutil.By("Configure the duplicated MachineSet to use the new secret")
err = newMs.Patch("json", `[{ "op": "replace", "path": "/spec/template/spec/providerSpec/value/userDataSecret/name", "value": "`+newSecretName+`" }]`)
o.Expect(err).NotTo(o.HaveOccurred(), "Error patching MachineSet %s to use the new secret %s", newMs.GetName(), newSecretName)
logger.Infof("OK!\n")
return newMs
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
f9381b18-bfe8-4ddc-88f1-8ed4fa8cfd04
|
removeClonedMachineSet
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func removeClonedMachineSet(ms *MachineSet, mcp *MachineConfigPool, expectedNumWorkers int) {
if ms.Exists() {
logger.Infof("Scaling %s machineset to zero", ms.GetName())
o.Expect(ms.ScaleTo(0)).To(o.Succeed(),
"Error scaling MachineSet %s to 0", ms.GetName())
logger.Infof("Waiting %s machineset for being ready", ms.GetName())
o.Eventually(ms.GetIsReady, "2m", "15s").Should(o.BeTrue(), "MachineSet %s is not ready", ms.GetName())
logger.Infof("Removing %s machineset", ms.GetName())
o.Expect(ms.Delete()).To(o.Succeed(),
"Error deleting MachineSet %s", ms.GetName())
if expectedNumWorkers >= 0 {
exutil.By("Check that worker pool is increased and updated")
// Before calling mcp.GetNodes we wait for the MachineCount number to settle, to avoid a panic due to nodes disappearing while we calculate the number of nodes
o.Eventually(mcp.getMachineCount, "5m", "30s").Should(o.Equal(expectedNumWorkers),
"The MachineCount has not the expected value in pool:\n%s", mcp.PrettyString())
o.Eventually(mcp.GetNodes, "5m", "30s").Should(o.HaveLen(expectedNumWorkers),
"The number of nodes is not the expected one in pool:\n%s", mcp.PrettyString())
}
}
clonedSecret := NewSecret(ms.oc, MachineAPINamespace, getClonedSecretName(ms.GetName()))
if clonedSecret.Exists() {
logger.Infof("Removing %s secret", clonedSecret)
o.Expect(clonedSecret.Delete()).To(o.Succeed(),
"Error deleting %s", ms.GetName())
}
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
70878455-4902-46db-9835-f73254163356
|
getRHCOSImagesInfo
|
['"fmt"', '"io"', '"net/http"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func getRHCOSImagesInfo(version string) (string, error) {
var (
err error
resp *http.Response
numRetries = 3
retryDelay = time.Minute
rhcosURL = fmt.Sprintf("https://raw.githubusercontent.com/openshift/installer/release-%s/data/data/rhcos.json", version)
)
if CompareVersions(version, ">=", "4.10") {
rhcosURL = fmt.Sprintf("https://raw.githubusercontent.com/openshift/installer/release-%s/data/data/coreos/rhcos.json", version)
}
// To mitigate network errors we will retry in case of failure
logger.Infof("Getting rhcos image info from: %s", rhcosURL)
for i := 0; i < numRetries; i++ {
if i > 0 {
logger.Infof("Error while getting the rhcos mages json data: %s.\nWaiting %s and retrying. Num retries: %d", err, retryDelay, i)
time.Sleep(retryDelay)
}
resp, err = http.Get(rhcosURL)
if err == nil {
break
}
}
if err != nil {
return "", err
}
defer resp.Body.Close()
// We Read the response body
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
bf424ad0-1e9b-4c1b-aa5d-e97aeb353aff
|
convertArch
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func convertArch(arch architecture.Architecture) string {
stringArch := ""
switch arch {
case architecture.AMD64:
stringArch = "x86_64"
case architecture.ARM64:
stringArch = "aarch64"
default:
stringArch = arch.String()
}
return stringArch
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
2149a6c9-7dc6-4982-9179-577b83220675
|
getCurrentRegionOrFail
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func getCurrentRegionOrFail(oc *exutil.CLI) string {
infra := NewResource(oc.AsAdmin(), "infrastructure", "cluster")
return infra.GetOrFail(`{.status.platformStatus.aws.region}`)
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
d1fa86ed-d11d-483b-9824-086bb642d574
|
SimpleScaleUPTest
|
['"fmt"', 'o "github.com/onsi/gomega"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func SimpleScaleUPTest(oc *exutil.CLI, mcp *MachineConfigPool, imageVersion, ignitionVergsion string, numNewNodes int) {
var (
newMsName = fmt.Sprintf("mco-tc-%s-cloned", GetCurrentTestPolarionIDNumber())
initialNumWorkers = len(mcp.GetNodesOrFail())
)
defer func() {
logger.Infof("Start TC defer block")
newMs := NewMachineSet(oc.AsAdmin(), MachineAPINamespace, newMsName)
errors := o.InterceptGomegaFailures(func() { removeClonedMachineSet(newMs, mcp, initialNumWorkers) }) // We don't want gomega to fail and stop the deferred cleanup process
if len(errors) != 0 {
logger.Infof("There were errors restoring the original MachineSet resources in the cluster")
for _, e := range errors {
logger.Errorf(e)
}
}
// We don't want the test to pass if there were errors while restoring the initial state
o.Expect(len(errors)).To(o.BeZero(),
"There were %d errors while recovering the cluster's initial state", len(errors))
logger.Infof("End TC defer block")
}()
logger.Infof("Create a new MachineSet using the right base image")
allMs, err := NewMachineSetList(oc, MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of MachineSet resources")
ms := allMs[0]
newMs := cloneMachineSet(oc.AsAdmin(), ms, newMsName, imageVersion, ignitionVergsion)
exutil.By("Scale MachineSet up")
logger.Infof("Scaling up machineset %s", newMs.GetName())
scaleErr := newMs.ScaleTo(numNewNodes)
o.Expect(scaleErr).NotTo(o.HaveOccurred(), "Error scaling up MachineSet %s", newMs.GetName())
logger.Infof("Waiting %s machineset for being ready", newMsName)
o.Eventually(newMs.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", newMs.GetName())
logger.Infof("OK!\n")
exutil.By("Check that worker pool is increased and updated")
o.Eventually(mcp.GetNodesOrFail, "5m", "30s").Should(o.HaveLen(initialNumWorkers+numNewNodes),
"The worker pool has not added the new nodes created by the new Machineset.\n%s", mcp.PrettyString())
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Scale down and remove the cloned Machineset")
removeClonedMachineSet(newMs, mcp, initialNumWorkers)
logger.Infof("OK!\n")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
53cfb70d-4007-4929-85c8-931541ffc906
|
getClonedSecretName
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func getClonedSecretName(msName string) string {
return clonedPrefix + msName
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
19a01810-c2bb-42bd-b875-6ba5302b006c
|
GetRHCOSHandler
|
['"fmt"']
|
['AWSRHCOSHandler', 'GCPRHCOSHandler', 'VsphereRHCOSHandler']
|
['RHCOSHandler']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func GetRHCOSHandler(platform string) (RHCOSHandler, error) {
switch platform {
case AWSPlatform:
return AWSRHCOSHandler{}, nil
case GCPPlatform:
return GCPRHCOSHandler{}, nil
case VspherePlatform:
return VsphereRHCOSHandler{}, nil
default:
return nil, fmt.Errorf("Platform %s is not supported and cannot get RHCOSHandler", platform)
}
}
|
mco
| ||
function
|
openshift/openshift-tests-private
|
27db616c-a2e0-4b71-9fd3-c37263a69904
|
GetBaseImageFromRHCOSImageInfo
|
['"fmt"', '"path"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', '"github.com/tidwall/gjson"']
|
['AWSRHCOSHandler']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func (aws AWSRHCOSHandler) GetBaseImageFromRHCOSImageInfo(version string, arch architecture.Architecture, region string) (string, error) {
var (
path string
stringArch = convertArch(arch)
platform = AWSPlatform
)
rhcosImageInfo, err := getRHCOSImagesInfo(version)
if err != nil {
return "", err
}
if region == "" {
return "", fmt.Errorf("Region cannot have an empty value when we try to get the base image in platform %s", platform)
}
if CompareVersions(version, "<", "4.10") {
path = `amis.` + region + `.hvm`
} else {
path = fmt.Sprintf("architectures.%s.images.%s.regions.%s.image", stringArch, platform, region)
}
logger.Infof("Looking for rhcos base image info in path %s", path)
baseImage := gjson.Get(rhcosImageInfo, path)
if !baseImage.Exists() {
logger.Infof("rhcos info:\n%s", rhcosImageInfo)
return "", fmt.Errorf("Could not find the base image for version <%s> in platform <%s> architecture <%s> and region <%s> with path %s",
version, platform, arch, region, path)
}
return baseImage.String(), nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
ca8ac1bd-5f8c-4d92-8296-7bddf655a8ab
|
GetBaseImageURLFromRHCOSImageInfo
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['AWSRHCOSHandler']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func (aws AWSRHCOSHandler) GetBaseImageURLFromRHCOSImageInfo(version string, arch architecture.Architecture) (string, error) {
return getBaseImageURLFromRHCOSImageInfo(version, "aws", "vmdk.gz", convertArch(arch))
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
8a15d537-1de5-4e20-8f37-1b3e6ac92d42
|
GetBaseImageFromRHCOSImageInfo
|
['"fmt"', '"path"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', '"github.com/tidwall/gjson"']
|
['GCPRHCOSHandler']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func (gcp GCPRHCOSHandler) GetBaseImageFromRHCOSImageInfo(version string, arch architecture.Architecture, region string) (string, error) {
var (
imagePath string
projectPath string
stringArch = convertArch(arch)
platform = GCPPlatform
)
if CompareVersions(version, "==", "4.1") {
return "", fmt.Errorf("There is no image base image supported for platform %s in version %s", platform, version)
}
rhcosImageInfo, err := getRHCOSImagesInfo(version)
if err != nil {
return "", err
}
if CompareVersions(version, "<", "4.10") {
imagePath = "gcp.image"
projectPath = "gcp.project"
} else {
imagePath = fmt.Sprintf("architectures.%s.images.%s.name", stringArch, platform)
projectPath = fmt.Sprintf("architectures.%s.images.%s.project", stringArch, platform)
}
logger.Infof("Looking for rhcos base image name in path %s", imagePath)
baseImage := gjson.Get(rhcosImageInfo, imagePath)
if !baseImage.Exists() {
logger.Infof("rhcos info:\n%s", rhcosImageInfo)
return "", fmt.Errorf("Could not find the base image for version <%s> in platform <%s> architecture <%s> and region <%s> with path %s",
version, platform, arch, region, imagePath)
}
logger.Infof("Looking for rhcos base image project in path %s", projectPath)
project := gjson.Get(rhcosImageInfo, projectPath)
if !project.Exists() {
logger.Infof("rhcos info:\n%s", rhcosImageInfo)
return "", fmt.Errorf("Could not find the project where the base image is stored with version <%s> in platform <%s> architecture <%s> and region <%s> with path %s",
version, platform, arch, region, projectPath)
}
return fmt.Sprintf("projects/%s/global/images/%s", project.String(), baseImage.String()), nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
5efb22bd-1c34-4832-a8c4-07142736f324
|
GetBaseImageURLFromRHCOSImageInfo
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['GCPRHCOSHandler']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func (gcp GCPRHCOSHandler) GetBaseImageURLFromRHCOSImageInfo(version string, arch architecture.Architecture) (string, error) {
return getBaseImageURLFromRHCOSImageInfo(version, "gcp", "tar.gz", convertArch(arch))
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
27405b9b-aef8-4813-97bc-cade8f1aaecd
|
GetBaseImageFromRHCOSImageInfo
|
['"path"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['VsphereRHCOSHandler']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func (vsp VsphereRHCOSHandler) GetBaseImageFromRHCOSImageInfo(version string, arch architecture.Architecture, _ string) (string, error) {
baseImageURL, err := vsp.GetBaseImageURLFromRHCOSImageInfo(version, arch)
if err != nil {
return "", err
}
return path.Base(baseImageURL), nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
ffdb4efa-e389-4bd6-bcb1-e0ed74540bb4
|
GetBaseImageURLFromRHCOSImageInfo
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['VsphereRHCOSHandler']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func (vsp VsphereRHCOSHandler) GetBaseImageURLFromRHCOSImageInfo(version string, arch architecture.Architecture) (string, error) {
return getBaseImageURLFromRHCOSImageInfo(version, "vmware", "ova", convertArch(arch))
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
abc66e1f-63b6-450b-ba9b-9f1f4e139cf1
|
getBaseImageURLFromRHCOSImageInfo
|
['"fmt"', '"path"', '"strings"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', '"github.com/tidwall/gjson"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func getBaseImageURLFromRHCOSImageInfo(version, platform, format, stringArch string) (string, error) {
var (
imagePath string
baseURIPath string
olderThan410 = CompareVersions(version, "<", "4.10")
)
rhcosImageInfo, err := getRHCOSImagesInfo(version)
if err != nil {
return "", err
}
if olderThan410 {
imagePath = fmt.Sprintf("images.%s.path", platform)
baseURIPath = "baseURI"
} else {
imagePath = fmt.Sprintf("architectures.%s.artifacts.%s.formats.%s.disk.location", stringArch, platform, strings.ReplaceAll(format, ".", `\.`))
}
logger.Infof("Looking for rhcos base image path name in path %s", imagePath)
baseImageURL := gjson.Get(rhcosImageInfo, imagePath)
if !baseImageURL.Exists() {
logger.Infof("rhcos info:\n%s", rhcosImageInfo)
return "", fmt.Errorf("Could not find the base image for version <%s> in platform <%s> architecture <%s> and format <%s> with path %s",
version, platform, stringArch, format, imagePath)
}
if !olderThan410 {
return baseImageURL.String(), nil
}
logger.Infof("Looking for baseURL in path %s", baseURIPath)
baseURI := gjson.Get(rhcosImageInfo, baseURIPath)
if !baseURI.Exists() {
logger.Infof("rhcos info:\n%s", rhcosImageInfo)
return "", fmt.Errorf("Could not find the base URI with version <%s> in platform <%s> architecture <%s> and format <%s> with path %s",
version, platform, stringArch, format, baseURIPath)
}
return fmt.Sprintf("%s/%s", strings.Replace(strings.Trim(baseURI.String(), "/"), "releases-art-rhcos.svc.ci.openshift.org", "rhcos.mirror.openshift.com", 1), strings.Trim(baseImageURL.String(), "/")), nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d8ec3b14-d73c-4d68-a052-239193894ee4
|
uploadBaseImageToCloud
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func uploadBaseImageToCloud(oc *exutil.CLI, platform, baseImageURL, baseImage string) error {
switch platform {
case AWSPlatform:
logger.Infof("No need to updload images in AWS")
return nil
case GCPPlatform:
logger.Infof("No need to updload images in GCP")
return nil
case VspherePlatform:
server, dataCenter, dataStore, resourcePool, user, password, err := getvSphereCredentials(oc.AsAdmin())
if err != nil {
return err
}
err = uploadBaseImageToVsphere(baseImageURL, baseImage, server, dataCenter, dataStore, resourcePool, user, password)
if err != nil {
return err
}
return nil
default:
return fmt.Errorf("Platform %s is not supported, base image cannot be updloaded", platform)
}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
9f27b0c1-dbda-4770-b6bf-aac5fbb36c94
|
uploadBaseImageToVsphere
|
['"os"', '"os/exec"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func uploadBaseImageToVsphere(baseImageSrc, baseImageDest, server, dataCenter, dataStore, resourcePool, user, password string) error {
var (
execBin = "govc"
uploadCommand = []string{"import.ova", "--debug", "--name", baseImageDest, baseImageSrc}
upgradeHWCommand = []string{"vm.upgrade", "-vm", baseImageDest}
templateCommand = []string{"vm.markastemplate", baseImageDest}
govcEnv = []string{
"GOVC_URL=" + server,
"GOVC_USERNAME=" + user,
"GOVC_PASSWORD=" + password,
"GOVC_DATASTORE=" + dataStore,
"GOVC_RESOURCE_POOL=" + resourcePool,
"GOVC_DATACENTER=" + dataCenter,
"GOVC_INSECURE=true",
}
)
logger.Infof("Uploading base image %s to vsphere with name %s", baseImageSrc, baseImageDest)
logger.Infof("%s %s", execBin, uploadCommand)
uploadCmd := exec.Command(execBin, uploadCommand...)
originalEnv := os.Environ()
// In prow the GOVC_TLS_CA_CERTS is not correctly set and it is making the govc command fail.
// we remove this variable from the environment
var execEnv []string
for _, envVar := range originalEnv {
if strings.HasPrefix(envVar, "GOVC_TLS_CA_CERTS=") {
continue
}
execEnv = append(execEnv, envVar)
}
execEnv = append(execEnv, govcEnv...)
uploadCmd.Env = execEnv
out, err := uploadCmd.CombinedOutput()
logger.Infof(string(out))
if err != nil {
if strings.Contains(string(out), "already exists") {
logger.Infof("Image %s already exists in the cloud, we don't upload it again", baseImageDest)
} else {
return err
}
}
logger.Infof("Upgrading VM's hardware")
logger.Infof("%s %s", execBin, upgradeHWCommand)
upgradeCmd := exec.Command(execBin, upgradeHWCommand...)
upgradeCmd.Env = execEnv
out, err = upgradeCmd.CombinedOutput()
logger.Infof(string(out))
if err != nil {
// We don't fail. We log a warning and continue.
logger.Warnf("ERROR UPGRADING HARDWARE: %s", err)
}
logger.Infof("Transforming VM into template")
logger.Infof("%s %s", execBin, templateCommand)
templateCmd := exec.Command(execBin, templateCommand...)
templateCmd.Env = execEnv
out, err = templateCmd.CombinedOutput()
logger.Infof(string(out))
if err != nil {
// We don't fail. We log a warning and continue.
logger.Warnf("ERROR CONVERTING INTO TEMPLATE: %s", err)
}
return nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
c4d61108-c74d-4bc3-b9a8-4ecd908dfe7c
|
getvSphereCredentials
|
['"fmt"', '"path"', '"strings"', '"github.com/tidwall/gjson"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
func getvSphereCredentials(oc *exutil.CLI) (server, dataCenter, dataStore, resourcePool, user, password string, err error) {
var (
configCM = NewConfigMap(oc.AsAdmin(), "openshift-config", "cloud-provider-config")
credsSecret = NewSecret(oc.AsAdmin(), "kube-system", "vsphere-creds")
)
config, err := configCM.GetDataValue("config")
if err != nil {
return
}
cfg, err := ini.Load(strings.NewReader(config))
if err == nil {
logger.Infof("%s config info is in ini fomart. Extracting data", configCM)
server = cfg.Section("Workspace").Key("server").String()
dataCenter = cfg.Section("Workspace").Key("datacenter").String()
dataStore = cfg.Section("Workspace").Key("default-datastore").String()
resourcePool = cfg.Section("Workspace").Key("resourcepool-path").String()
} else {
logger.Infof("%s config info is NOT in ini fomart. Trying to extract the information from the infrastructure resource", configCM)
infra := NewResource(oc.AsAdmin(), "infrastructure", "cluster")
var failureDomain string
failureDomain, err = infra.Get(`{.spec.platformSpec.vsphere.failureDomains[0]}`)
if err != nil {
logger.Errorf("Cannot get the failureDomain from the infrastructure resource: %s", err)
return
}
if failureDomain == "" {
logger.Errorf("Failure domain is empty in the infrastructure resource: %s\n%s", err, infra.PrettyString())
err = fmt.Errorf("Empty failure domain in the infrastructure resource")
return
}
gserver := gjson.Get(failureDomain, "server")
if gserver.Exists() {
server = gserver.String()
} else {
err = fmt.Errorf("Cannot get the server value from failureDomain\n%s", infra.PrettyString())
return
}
gdataCenter := gjson.Get(failureDomain, "topology.datacenter")
if gdataCenter.Exists() {
dataCenter = gdataCenter.String()
} else {
err = fmt.Errorf("Cannot get the data center value from failureDomain\n%s", infra.PrettyString())
return
}
gdataStore := gjson.Get(failureDomain, "topology.datastore")
if gdataStore.Exists() {
dataStore = gdataStore.String()
} else {
err = fmt.Errorf("Cannot get the data store value from failureDomain\n%s", infra.PrettyString())
return
}
gresourcePool := gjson.Get(failureDomain, "topology.resourcePool")
if gresourcePool.Exists() {
resourcePool = gresourcePool.String()
} else {
err = fmt.Errorf("Cannot get the resourcepool value from failureDomain\n%s", infra.PrettyString())
return
}
}
decodedData, err := credsSecret.GetDecodedDataMap()
if err != nil {
return
}
for k, v := range decodedData {
item := v
if strings.Contains(k, "username") {
user = item
}
if strings.Contains(k, "password") {
password = item
}
}
if user == "" {
logger.Errorf("Empty vsphere user")
err = fmt.Errorf("The vsphere user is empty")
return
}
if password == "" {
logger.Errorf("Empty vsphere password")
err = fmt.Errorf("The vsphere password is empty")
return
}
return
}
|
mco
| ||||
test case
|
openshift/openshift-tests-private
|
8e21b1e9-f92b-432d-9f1a-155695e79a62
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-63894-[P1] Scaleup using 4.1 cloud image[Disruptive]
|
['"time"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-63894-[P1] Scaleup using 4.1 cloud image[Disruptive]", func() {
var (
imageVersion = "4.1" // OCP4.1 ami for AWS and use-east2 zone: https://github.com/openshift/installer/blob/release-4.1/data/data/rhcos.json
numNewNodes = 1 // the number of nodes scaled up in the new Machineset
)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform) // Scale up using 4.1 is only supported in AWS. GCP is only supported in versions 4.6+, and Vsphere in 4.2+
skipTestIfFIPSIsEnabled(oc.AsAdmin()) // fips was supported for the first time in 4.3, hence it is not supported to scale 4.1 and 4.2 base images in clusters with fips=true
architecture.SkipNonAmd64SingleArch(oc) // arm64 is not supported until 4.12
// Apply workaround
// Because of https://issues.redhat.com/browse/OCPBUGS-27273 this test case fails when the cluster has imagecontentsourcepolicies
// In prow jobs clusters have 2 imagecontentsourcepolicies (brew-registry and ), we try to remove them to execute this test
// It only happens using 4.1 base images. The issue was fixed in 4.2
// For debugging purposes
oc.AsAdmin().Run("get").Args("ImageContentSourcePolicy").Execute()
oc.AsAdmin().Run("get").Args("ImageTagMirrorSet").Execute()
oc.AsAdmin().Run("get").Args("ImageDigestMirrorSet").Execute()
cleanedICSPs := []*Resource{NewResource(oc.AsAdmin(), "ImageContentSourcePolicy", "brew-registry"), NewResource(oc.AsAdmin(), "ImageContentSourcePolicy", "image-policy")}
logger.Warnf("APPLYING WORKAROUND FOR https://issues.redhat.com/browse/OCPBUGS-27273. Removing expected imageocontentsourcepolicies")
removedICSP := false
defer func() {
if removedICSP {
wMcp.waitForComplete()
mMcp.WaitImmediateForUpdatedStatus()
}
}()
for _, item := range cleanedICSPs {
icsp := item
if icsp.Exists() {
logger.Infof("Cleaning the spec of %s", icsp)
defer icsp.SetSpec(icsp.GetSpecOrFail())
o.Expect(icsp.SetSpec("{}")).To(o.Succeed(),
"Error cleaning %s spec", icsp)
removedICSP = true
}
}
if removedICSP {
wMcp.waitForComplete()
o.Expect(mMcp.WaitImmediateForUpdatedStatus()).To(o.Succeed())
} else {
logger.Infof("No ICSP was removed!!")
}
SimpleScaleUPTest(oc, wMcp, imageVersion, getUserDataIgnitionVersionFromOCPVersion(imageVersion), numNewNodes)
})
| |||||
test case
|
openshift/openshift-tests-private
|
4655e468-5a00-48e3-8b8e-4bdd316c1a3b
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-77051-[P2] Scaleup using 4.3 cloud image[Disruptive]
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-77051-[P2] Scaleup using 4.3 cloud image[Disruptive]", func() {
var (
imageVersion = "4.3"
numNewNodes = 1 // the number of nodes scaled up in the new Machineset
)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, VspherePlatform) // Scale up using 4.3 is only supported in AWS, and Vsphere. GCP is only supported by our automation in versions 4.6+
architecture.SkipNonAmd64SingleArch(oc) // arm64 is not supported by OCP until 4.12
SimpleScaleUPTest(oc, wMcp, imageVersion, getUserDataIgnitionVersionFromOCPVersion(imageVersion), numNewNodes)
})
| |||||
test case
|
openshift/openshift-tests-private
|
2a5a18cb-4521-4952-ae9e-351dfcbc9c25
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-76471-[P1] Scaleup using 4.12 cloud image[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-76471-[P1] Scaleup using 4.12 cloud image[Disruptive]", func() {
var (
imageVersion = "4.12"
numNewNodes = 1 // the number of nodes scaled up in the new Machineset
)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, GCPPlatform, VspherePlatform) // Scale up using 4.12 is only supported in AWS, GCP and Vsphere
SimpleScaleUPTest(oc, wMcp, imageVersion, getUserDataIgnitionVersionFromOCPVersion(imageVersion), numNewNodes)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
3855c191-3b22-4134-aa5e-2d9baaf23bb3
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-52822-[P1] Create new config resources with 2.2.0 ignition boot image nodes [Disruptive]
|
['"fmt"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'o "github.com/onsi/gomega"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-52822-[P1] Create new config resources with 2.2.0 ignition boot image nodes [Disruptive]", func() {
var (
newMsName = "copied-machineset-modified-tc-52822"
kcName = "change-maxpods-kubelet-config"
kcTemplate = generateTemplateAbsolutePath(kcName + ".yaml")
crName = "change-ctr-cr-config"
crTemplate = generateTemplateAbsolutePath(crName + ".yaml")
mcName = "generic-config-file-test-52822"
mcpWorker = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
// Set the 4.5 boot image ami for east-2 zone.
// the right ami should be selected from here https://github.com/openshift/installer/blob/release-4.5/data/data/rhcos.json
imageVersion = "4.5"
numNewNodes = 1 // the number of nodes scaled up in the new Machineset
)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, VspherePlatform) // Scale up using 4.5 is only supported for AWS and Vsphere. GCP is only supported in versions 4.6+
architecture.SkipNonAmd64SingleArch(oc) // arm64 is not supported until 4.11
initialNumWorkers := len(wMcp.GetNodesOrFail())
defer func() {
logger.Infof("Start TC defer block")
newMs := NewMachineSet(oc.AsAdmin(), MachineAPINamespace, newMsName)
errors := o.InterceptGomegaFailures(func() { // We don't want gomega to fail and stop the deferred cleanup process
removeClonedMachineSet(newMs, wMcp, initialNumWorkers)
cr := NewContainerRuntimeConfig(oc.AsAdmin(), crName, crTemplate)
if cr.Exists() {
logger.Infof("Removing ContainerRuntimeConfig %s", cr.GetName())
o.Expect(cr.Delete()).To(o.Succeed(), "Error removing %s", cr)
}
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
if kc.Exists() {
logger.Infof("Removing KubeletConfig %s", kc.GetName())
o.Expect(kc.Delete()).To(o.Succeed(), "Error removing %s", kc)
}
// MachineConfig struct has not been refactored to compose the "Resource" struct
// so there is no "Exists" method available. Use it after refactoring MachineConfig
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
logger.Infof("Removing machineconfig %s", mcName)
mc.delete()
})
if len(errors) != 0 {
logger.Infof("There were errors restoring the original MachineSet resources in the cluster")
for _, e := range errors {
logger.Errorf(e)
}
}
logger.Infof("Waiting for worker pool to be updated")
mcpWorker.waitForComplete()
// We don't want the test to pass if there were errors while restoring the initial state
o.Expect(len(errors)).To(o.BeZero(),
"There were %d errors while recovering the cluster's initial state", len(errors))
logger.Infof("End TC defer block")
}()
// Duplicate an existing MachineSet
allMs, err := NewMachineSetList(oc, MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of MachineSet resources")
ms := allMs[0]
newMs := cloneMachineSet(oc.AsAdmin(), ms, newMsName, imageVersion, getUserDataIgnitionVersionFromOCPVersion(imageVersion))
// KubeletConfig
exutil.By("Create KubeletConfig")
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
kc.create()
kc.waitUntilSuccess("10s")
logger.Infof("OK!\n")
// ContainterRuntimeConfig
exutil.By("Create ContainterRuntimeConfig")
cr := NewContainerRuntimeConfig(oc.AsAdmin(), crName, crTemplate)
cr.create()
cr.waitUntilSuccess("10s")
logger.Infof("OK!\n")
// Generic machineconfig
exutil.By("Create generic config file")
genericConfigFilePath := "/etc/test-52822"
genericConfig := "config content for test case 52822"
fileConfig := getURLEncodedFileConfig(genericConfigFilePath, genericConfig, "420")
template := NewMCOTemplate(oc, "generic-machine-config-template.yml")
errCreate := template.Create("-p", "NAME="+mcName, "-p", "POOL=worker", "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(errCreate).NotTo(o.HaveOccurred(), "Error creating MachineConfig %s", mcName)
logger.Infof("OK!\n")
// Wait for all pools to apply the configs
exutil.By("Wait for worker MCP to be updated")
mcpWorker.waitForComplete()
logger.Infof("OK!\n")
// Scale up the MachineSet
exutil.By("Scale MachineSet up")
logger.Infof("Scaling up machineset %s", newMs.GetName())
scaleErr := newMs.ScaleTo(numNewNodes)
o.Expect(scaleErr).NotTo(o.HaveOccurred(), "Error scaling up MachineSet %s", newMs.GetName())
logger.Infof("Waiting %s machineset for being ready", newMsName)
o.Eventually(newMs.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", newMs.GetName())
logger.Infof("OK!\n")
exutil.By("Check that worker pool is increased and updated")
o.Eventually(wMcp.GetNodesOrFail, "5m", "30s").Should(o.HaveLen(initialNumWorkers+numNewNodes),
"The worker pool has not added the new nodes created by the new Machineset.\n%s", wMcp.PrettyString())
// Verify that the scaled nodes has been configured properly
exutil.By("Check config in the new node")
newNodes, nErr := newMs.GetNodes()
o.Expect(nErr).NotTo(o.HaveOccurred(), "Error getting the nodes created by MachineSet %s", newMs.GetName())
o.Expect(newNodes).To(o.HaveLen(numNewNodes), "Only %d nodes should have been created by MachineSet %s", numNewNodes, newMs.GetName())
newNode := newNodes[0]
logger.Infof("New node: %s", newNode.GetName())
logger.Infof("OK!\n")
exutil.By("Check kubelet config")
kcFile := NewRemoteFile(*newNode, "/etc/kubernetes/kubelet.conf")
kcrErr := kcFile.Fetch()
o.Expect(kcrErr).NotTo(o.HaveOccurred(), "Error reading kubelet config in node %s", newNode.GetName())
o.Expect(kcFile.GetTextContent()).Should(o.Or(o.ContainSubstring(`"maxPods": 500`), o.ContainSubstring(`maxPods: 500`)),
"File /etc/kubernetes/kubelet.conf has not the expected content")
logger.Infof("OK!\n")
exutil.By("Check container runtime config")
crFile := NewRemoteFile(*newNode, "/etc/containers/storage.conf")
crrErr := crFile.Fetch()
o.Expect(crrErr).NotTo(o.HaveOccurred(), "Error reading container runtime config in node %s", newNode.GetName())
o.Expect(crFile.GetTextContent()).Should(o.ContainSubstring("size = \"8G\""),
"File /etc/containers/storage.conf has not the expected content")
logger.Infof("OK!\n")
exutil.By("Check generic machine config")
cFile := NewRemoteFile(*newNode, genericConfigFilePath)
crErr := cFile.Fetch()
o.Expect(crErr).NotTo(o.HaveOccurred(), "Error reading generic config file in node %s", newNode.GetName())
o.Expect(cFile.GetTextContent()).Should(o.Equal(genericConfig),
"File %s has not the expected content", genericConfigFilePath)
logger.Infof("OK!\n")
exutil.By("Scale down and remove the cloned Machineset")
removeClonedMachineSet(newMs, wMcp, initialNumWorkers)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
6059d96c-816a-4e10-a992-168986ae1c5c
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-65923-SSH key in scaled clusters [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-High-65923-SSH key in scaled clusters [Disruptive]", func() {
// It is a safe assumpion that all the tested clusters will have a sshkey deployed in it.
// If at any moment this assumption is not safe anymore, we need to check for the sshkey to exist
// and create a MC to deploy a sskey in case of no sshkey deployed
var (
initialNumWorkers = len(wMcp.GetNodesOrFail())
numNewNodes = 1
)
defer wMcp.waitForComplete()
exutil.By("Scale up a machineset")
allMs, err := NewMachineSetList(oc, MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of MachineSet resources")
ms := allMs[0]
initialMsNodes, err := ms.GetNodes()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of nodes that belong to machineset %s", ms.GetName())
initialNumMsNodes := len(initialMsNodes)
logger.Infof("Scaling up machineset %s by 1", ms.GetName())
defer func() { _ = ms.ScaleTo(initialNumMsNodes) }()
o.Expect(ms.ScaleTo(initialNumMsNodes+numNewNodes)).NotTo(
o.HaveOccurred(),
"Error scaling up MachineSet %s", ms.GetName())
logger.Infof("OK!\n")
logger.Infof("Waiting %s machineset for being ready", ms)
o.Eventually(ms.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", ms.GetName())
logger.Infof("OK!\n")
exutil.By("Check that worker pool is increased and updated")
o.Eventually(wMcp.GetNodesOrFail, "5m", "30s").Should(o.HaveLen(initialNumWorkers+numNewNodes),
"The worker pool has not added the new nodes created by the new Machineset.\n%s", wMcp.PrettyString())
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the sshkey exists in all nodes")
currentWorkers := wMcp.GetNodesOrFail()
for _, node := range currentWorkers {
logger.Infof("Checking sshkey in node %s", node.GetName())
remoteSSHKey := NewRemoteFile(node, "/home/core/.ssh/authorized_keys.d/ignition")
o.Expect(remoteSSHKey.Fetch()).To(o.Succeed(),
"Error getting the content of the sshkey file in node %s", node.GetName())
o.Expect(remoteSSHKey.GetTextContent()).NotTo(o.BeEmpty(),
"The sshkey file has no content in node %s", node.GetName())
logger.Infof("Sshkey is OK in node %s", node.GetName())
}
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0a6741b1-79aa-42e6-aadc-e0778940957e
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-64623-[P1] Machine Config Server CA rotation. IPI. [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-64623-[P1] Machine Config Server CA rotation. IPI. [Disruptive]", func() {
var (
initialNumWorkers = len(wMcp.GetNodesOrFail())
numNewNodes = 1
)
// skip the test if fips is not enabled
skipTestIfFIPSIsNotEnabled(oc)
defer wMcp.waitForComplete()
exutil.By("Rotate MCS certificate")
initialMCSPods, err := GetMCSPodNames(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting MCS pod names")
logger.Infof("Current MCS pod names: %s", initialMCSPods)
o.Expect(
RotateMCSCertificates(oc.AsAdmin()),
// oc.AsAdmin().WithoutNamespace().Run("adm").Args("ocp-certificates", "regenerate-machine-config-server-serving-cert").Execute(),
).To(o.Succeed(),
"Error rotating MCS certificates")
logger.Infof("OK!\n")
exutil.By("Check that MCS pods were restarted")
o.Eventually(func(gm o.Gomega) {
// for debugging purposes
logger.Infof("Waiting for MCS pods to be restarted")
_ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", MachineConfigNamespace).Execute()
currentMCSPods, err := GetMCSPodNames(oc.AsAdmin())
gm.Expect(err).NotTo(o.HaveOccurred(),
"Error getting MCS pod names")
for _, initialMCSPod := range initialMCSPods {
gm.Expect(currentMCSPods).NotTo(o.ContainElement(initialMCSPod),
"MCS pod %s was not restarted after certs rotation", initialMCSPod)
}
}, "5m", "20s",
).Should(o.Succeed(),
"The MCS pods were not restarted after the MCS certificates were rotated")
logger.Infof("OK!\n")
exutil.By("Check that new machine-config-server-tls and machine-config-server-ca secrets are created")
tlsSecret := NewSecret(oc.AsAdmin(), MachineConfigNamespace, "machine-config-server-tls")
caSecret := NewSecret(oc.AsAdmin(), MachineConfigNamespace, "machine-config-server-ca")
o.Eventually(tlsSecret, "30s", "5s").Should(Exist(),
"%s secret does not exist in the MCO namespace after MCS cert rotations", tlsSecret.GetName())
o.Eventually(caSecret, "30s", "5s").Should(Exist(),
"%s secret does not exist in the MCO namespace after MCS cert rotations", tlsSecret.GetName())
logger.Infof("OK!\n")
exutil.By("Scale up a machineset")
allMs, err := NewMachineSetList(oc, MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of MachineSet resources")
ms := allMs[0]
initialMsNodes, err := ms.GetNodes()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of nodes that belong to machineset %s", ms.GetName())
initialNumMsNodes := len(initialMsNodes)
logger.Infof("Scaling up machineset %s by 1", ms.GetName())
defer func() { _ = ms.ScaleTo(initialNumMsNodes) }()
o.Expect(ms.ScaleTo(initialNumMsNodes+numNewNodes)).NotTo(
o.HaveOccurred(),
"Error scaling up MachineSet %s", ms.GetName())
logger.Infof("OK!\n")
logger.Infof("Waiting %s machineset for being ready", ms)
o.Eventually(ms.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", ms.GetName())
logger.Infof("OK!\n")
exutil.By("Check that worker pool is increased and updated")
o.Eventually(wMcp.GetNodesOrFail, "5m", "30s").Should(o.HaveLen(initialNumWorkers+numNewNodes),
"The worker pool has not added the new nodes created by the new Machineset.\n%s", wMcp.PrettyString())
wMcp.waitForComplete()
logger.Infof("All nodes are up and ready!")
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
bbdce9a7-6ce5-4b37-8a53-a175f9c14cc1
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-73636-[P2][OnCLayer] Pinned images in scaled nodes [Disruptive]
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_scale.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-73636-[P2][OnCLayer] Pinned images in scaled nodes [Disruptive]", func() {
// The pinnedimageset feature is currently only supported in techpreview
skipIfNoTechPreview(oc.AsAdmin())
var (
waitForPinned = time.Minute * 5
initialNumWorkers = len(wMcp.GetNodesOrFail())
numNewNodes = 3
pinnedImageSetName = "tc-73636-pinned-images-scale"
pinnedImageName = BusyBoxImage
)
exutil.By("Pin images")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, wMcp.GetName(), []string{pinnedImageName})
defer pis.DeleteAndWait(waitForPinned)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
logger.Infof("OK!\n")
exutil.By("Check that the pool is reporting the right pinnedimageset status")
o.Expect(wMcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", wMcp)
logger.Infof("OK!\n")
exutil.By("Check that the image was pinned in all nodes")
for _, node := range wMcp.GetNodesOrFail() {
rmi := NewRemoteImage(node, pinnedImageName)
o.Expect(rmi.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", rmi)
}
logger.Infof("OK!\n")
exutil.By("Scale up a machineset")
allMs, err := NewMachineSetList(oc, MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting a list of MachineSet resources")
ms := allMs[0]
initialNumMsNodes := len(ms.GetNodesOrFail())
logger.Infof("Scaling up machineset %s by %d", ms.GetName(), numNewNodes)
defer func() {
_ = ms.ScaleTo(initialNumMsNodes)
wMcp.waitForComplete()
}()
o.Expect(ms.ScaleTo(initialNumMsNodes+numNewNodes)).NotTo(
o.HaveOccurred(),
"Error scaling up MachineSet %s", ms.GetName())
logger.Infof("OK!\n")
exutil.By("Check that worker pool is increased and updated")
o.Eventually(wMcp.GetNodesOrFail, "5m", "30s").Should(o.HaveLen(initialNumWorkers+numNewNodes),
"The worker pool has not added the new nodes created by the new Machineset.\n%s", wMcp.PrettyString())
wMcp.waitForComplete()
logger.Infof("All nodes are up and ready!")
logger.Infof("OK!\n")
exutil.By("Check that the pool is reporting the right pinnedimageset status")
o.Expect(wMcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", wMcp)
logger.Infof("OK!\n")
exutil.By("Check that the image was pinned in all nodes")
for _, node := range wMcp.GetNodesOrFail() {
rmi := NewRemoteImage(node, pinnedImageName)
o.Expect(rmi.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", rmi)
}
logger.Infof("OK!\n")
})
| |||||
test
|
openshift/openshift-tests-private
|
d1fcf4ca-9937-4165-a0bd-1a2d2d537a44
|
mco_security
|
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
package mco
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
var _ = g.Describe("[sig-mco] MCO security", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("mco-security", exutil.KubeConfigPath())
wMcp *MachineConfigPool
mMcp *MachineConfigPool
// Compact compatible MCP. If the node is compact/SNO this variable will be the master pool, else it will be the worker pool
mcp *MachineConfigPool
cc *ControllerConfig
)
g.JustBeforeEach(func() {
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
mcp = GetCompactCompatiblePool(oc.AsAdmin())
cc = NewControllerConfig(oc.AsAdmin(), "machine-config-controller")
logger.Infof("%s %s %s", wMcp, mMcp, mcp)
preChecks(oc)
})
g.It("Author:sregidor-NonHyperShiftHOST-Medium-66048-[P1][OnCLayer] Check image registry user bundle certificate [Disruptive]", func() {
if !IsCapabilityEnabled(oc.AsAdmin(), "ImageRegistry") {
g.Skip("ImageRegistry is not installed, skip this test")
}
var (
mergedTrustedImageRegistryCACM = NewConfigMap(oc.AsAdmin(), "openshift-config-managed", "merged-trusted-image-registry-ca")
imageConfig = NewResource(oc.AsAdmin(), "image.config.openshift.io", "cluster")
certFileName = "caKey.pem"
cmName = "cm-test-ca"
)
exutil.By("Get current image.config spec")
initImageConfigSpec := imageConfig.GetOrFail(`{.spec}`)
defer func() {
logger.Infof("Restore original image.config spec: %s", initImageConfigSpec)
_ = imageConfig.Patch("json", `[{ "op": "add", "path": "/spec", "value": `+initImageConfigSpec+`}]`)
}()
initialCMCreationTime := mergedTrustedImageRegistryCACM.GetOrFail(`{.metadata.creationTimestamp}`)
logger.Infof("OK!\n")
exutil.By("Add new additionalTrustedCA to the image.config resource")
logger.Infof("Creating new config map with a new CA")
additionalTrustedCM, err := CreateConfigMapWithRandomCert(oc.AsAdmin(), "openshift-config", cmName, certFileName)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating a configmap with a CA")
defer additionalTrustedCM.Delete()
newCertificate := additionalTrustedCM.GetDataValueOrFail(certFileName)
logger.Infof("Configure the image.config resource to use the new configmap")
o.Expect(imageConfig.Patch("merge", fmt.Sprintf(`{"spec": {"additionalTrustedCA": {"name": "%s"}}}`, cmName))).To(
o.Succeed(),
"Error setting the new image.config spec")
logger.Infof("OK!\n")
exutil.By("Check that the ControllerConfig has been properly synced")
o.Eventually(cc.GetImageRegistryBundleUserDataByFileName,
"3m", "20s").WithArguments(certFileName).Should(
exutil.Secure(o.Equal(newCertificate)),
"The new certificate was not properly added to the controller config imageRegistryBundleUserData")
usrDataInfo, err := GetCertificatesInfoFromPemBundle(certFileName, []byte(newCertificate))
o.Expect(err).NotTo(o.HaveOccurred(),
"Error extracting certificate info from the new additional trusted CA")
o.Expect(cc.GetCertificatesInfoByBundleFileName(certFileName)).To(
o.Equal(usrDataInfo),
"The information reported in the ControllerConfig for bundle file %s is wrong", certFileName)
logger.Infof("OK!\n")
exutil.By("Check that the merged-trusted-image-registry-ca configmap has been properly synced")
o.Expect(mergedTrustedImageRegistryCACM.GetDataValueOrFail(certFileName)).To(
exutil.Secure(o.Equal(newCertificate)),
"The configmap -n openshift-config-managed merged-trusted-image-registry-ca was not properly synced")
o.Expect(mergedTrustedImageRegistryCACM.Get(`{.metadata.creationTimestamp}`)).To(
o.Equal(initialCMCreationTime),
"The %s resource was not patched! it was recreated! The configmap should be patched since https://issues.redhat.com/browse/OCPBUGS-18800")
logger.Infof("OK!\n")
// We verify that all nodes in the pools have the new certificate (be aware that windows nodes do not belong to any pool, we are skipping them)
for _, node := range append(wMcp.GetNodesOrFail(), mMcp.GetNodesOrFail()...) {
exutil.By(fmt.Sprintf("Check that the certificate was correctly deployed in node %s", node.GetName()))
EventuallyImageRegistryCertificateExistsInNode(certFileName, newCertificate, node, "5m", "30s")
logger.Infof("OK!\n")
}
exutil.By("Configure an empty image.config spec")
o.Expect(imageConfig.Patch("json", `[{ "op": "add", "path": "/spec", "value": {}}]`)).To(
o.Succeed(),
"Error configuring an empty image.config spec")
logger.Infof("OK!\n")
exutil.By("Check that the ControllerConfig was properly synced")
o.Eventually(cc.GetImageRegistryBundleUserData, "45s", "20s").ShouldNot(
exutil.Secure(o.HaveKey(certFileName)),
"The new certificate was not properly removed from the ControllerConfig imageRegistryBundleUserData")
o.Expect(cc.GetCertificatesInfoByBundleFileName(certFileName)).To(
exutil.Secure(o.BeEmpty()),
"The information reported in the ControllerConfig for bundle file %s was not removed", certFileName)
logger.Infof("OK!\n")
exutil.By("Check that the merged-trusted-image-registry-ca configmap has been properly synced")
o.Expect(mergedTrustedImageRegistryCACM.GetDataMap()).NotTo(
exutil.Secure(o.HaveKey(newCertificate)),
"The certificate was not removed from the configmap -n openshift-config-managed merged-trusted-image-registry-ca")
o.Expect(mergedTrustedImageRegistryCACM.Get(`{.metadata.creationTimestamp}`)).To(
o.Equal(initialCMCreationTime),
"The %s resource was not patched! it was recreated! The configmap should be patched since https://issues.redhat.com/browse/OCPBUGS-18800")
logger.Infof("OK!\n")
// We verify that the certificate was removed from all nodes in the pools (be aware that windows nodes do not belong to any pool, we are skipping them)
for _, node := range append(wMcp.GetNodesOrFail(), mMcp.GetNodesOrFail()...) {
exutil.By(fmt.Sprintf("Check that the certificate was correctly removed from node %s", node.GetName()))
certPath := filepath.Join(ImageRegistryCertificatesDir, certFileName, ImageRegistryCertificatesFileName)
rfCert := NewRemoteFile(node, certPath)
logger.Infof("Checking certificate file %s", certPath)
o.Eventually(rfCert.Exists, "5m", "20s").Should(
o.BeFalse(),
"The certificate %s was not removed from the node %s. But it should have been removed after the image.config reconfiguration",
certPath, node.GetName())
logger.Infof("OK!\n")
}
})
g.It("Author:sregidor-NonHyperShiftHOST-High-67660-[P2][OnCLayer] MCS generates ignition configs with certs [Disruptive]", func() {
var (
proxy = NewResource(oc.AsAdmin(), "proxy", "cluster")
certFileKey = "ca-bundle.crt"
cloudCertFileKey = "ca-bundle.pem"
userCABundleConfigMap = NewConfigMap(oc.AsAdmin(), "openshift-config", "user-ca-bundle")
cmName = "test-proxy-config"
cmNamespace = "openshift-config"
proxyConfigMap *ConfigMap
kubeCloudProviderConfigMap = GetCloudProviderConfigMap(oc.AsAdmin())
kubeCloudManagedConfigMap = NewConfigMap(oc.AsAdmin(), "openshift-config-managed", "kube-cloud-config")
kubeCertFile = "/etc/kubernetes/kubelet-ca.crt"
userCABundleCertFile = "/etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt"
kubeCloudCertFile = "/etc/kubernetes/static-pod-resources/configmaps/cloud-config/ca-bundle.pem"
ignitionConfig = "3.4.0"
node = mcp.GetSortedNodesOrFail()[0]
)
logger.Infof("Using pool %s for testing", mcp.GetName())
exutil.By("Getting initial status")
rfUserCA := NewRemoteFile(node, userCABundleCertFile)
o.Expect(rfUserCA.Fetch()).To(o.Succeed(), "Error getting the initial user CA bundle content")
initialUserCAContent := rfUserCA.GetTextContent()
rfCloudCA := NewRemoteFile(node, kubeCloudCertFile)
o.Expect(rfCloudCA.Fetch()).To(o.Succeed(), "Error getting the initial cloud CA bundle content")
initialCloudCAContent := rfCloudCA.GetTextContent()
defer func() {
wMcp.waitForComplete()
mMcp.waitForComplete()
exutil.By("Checking that the user CA bundle file content was properly restored when the configuration was removed")
o.Eventually(rfUserCA.Read, "5m", "20s").Should(exutil.Secure(HaveContent(initialUserCAContent)),
"The user CA bundle file content was not restored after the configuration was removed")
logger.Infof("OK!\n")
exutil.By("Checking that the cloud CA bundle file content was properly restored when the configuration was removed")
o.Eventually(rfCloudCA.Read, "5m", "20s").Should(exutil.Secure(HaveContent(initialCloudCAContent)),
"The cloud CA bundle file content was not restored after the configuration was removed")
logger.Infof("OK!\n")
}()
logger.Infof("OK!\n")
// Create a new config map and configure the proxy additional trusted CA if necessary
proxyConfigMapName := proxy.GetOrFail(`{.spec.trustedCA.name}`)
if proxyConfigMapName == "" {
var err error
exutil.By("Configure the proxy with an additional trusted CA")
logger.Infof("Create a configmap with the CA")
proxyConfigMap, err = CreateConfigMapWithRandomCert(oc.AsAdmin(), cmNamespace, cmName, certFileKey)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating a configmap with a CA")
defer proxyConfigMap.Delete()
logger.Infof("Patch the proxy resource to use the new configmap")
initProxySpec := proxy.GetOrFail(`{.spec}`)
defer func() {
logger.Infof("Restore original proxy spec: %s", initProxySpec)
_ = proxy.Patch("json", `[{ "op": "add", "path": "/spec", "value": `+initProxySpec+`}]`)
}()
proxy.Patch("merge", fmt.Sprintf(`{"spec": {"trustedCA": {"name": "%s"}}}`, cmName))
// TODO: remove this when the userCA bundle is handled using a controller and not a MC. It will be implemented in the near future
mcp.waitForComplete()
logger.Infof("OK!\n")
} else {
logger.Infof("The proxy is already configured to use the CA inside this configmap: %s", proxyConfigMapName)
proxyConfigMap = NewConfigMap(oc.AsAdmin(), "openshift-config", proxyConfigMapName)
}
exutil.By(fmt.Sprintf(`Check that the "%s" is in the ignition config`, kubeCertFile))
jsonPath := fmt.Sprintf(`storage.files.#(path=="%s")`, kubeCertFile)
o.Eventually(mcp.GetMCSIgnitionConfig,
"1m", "20s").WithArguments(true, ignitionConfig).ShouldNot(
HavePathWithValue(jsonPath, o.BeEmpty()),
"The file %s is not served in the ignition config", kubeCertFile)
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf(`Check that the "%s" is in the ignition config`, userCABundleCertFile))
logger.Infof("Check that the file is served in the ignition config")
jsonPath = fmt.Sprintf(`storage.files.#(path=="%s")`, userCABundleCertFile)
o.Eventually(mcp.GetMCSIgnitionConfig,
"1m", "20s").WithArguments(true, ignitionConfig).ShouldNot(
HavePathWithValue(jsonPath, o.BeEmpty()),
"The file %s is not served in the ignition config", userCABundleCertFile)
logger.Infof("Check that the file has the right content in the nodes")
certContent := ""
if userCABundleConfigMap.Exists() {
userCABundleCert, exists, err := userCABundleConfigMap.HasKey(certFileKey)
o.Expect(err).NotTo(o.HaveOccurred(), "Error checking if %s contains key '%s'", userCABundleConfigMap, certFileKey)
if exists {
certContent = userCABundleCert
}
} else {
logger.Infof("%s does not exist. We don't take it into account", userCABundleConfigMap)
}
// OCPQE-17800 only merge the cert contents when trusted CA in proxy/cluster is not cm/user-ca-bundle
if proxyConfigMap.GetName() != userCABundleConfigMap.GetName() {
certContent += proxyConfigMap.GetDataValueOrFail(certFileKey)
}
EventuallyFileExistsInNode(userCABundleCertFile, certContent, node, "3m", "20s")
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf(`Check that the "%s" is CA trusted. Command update-ca-trust was executed when the file was added`, userCABundleCertFile))
o.Eventually(BundleFileIsCATrusted, "5m", "20s").WithArguments(userCABundleCertFile, node).Should(
o.BeTrue(), "The %s file was not ca-trusted. It seems that the update-ca-trust command was not executed after updating the file", userCABundleCertFile)
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf(`Check that the "%s" is in the ignition config`, kubeCloudCertFile))
kubeCloudCertContent, err := kubeCloudManagedConfigMap.GetDataValue("ca-bundle.pem")
if err != nil {
logger.Infof("No KubeCloud cert configured, configuring a new value")
if kubeCloudProviderConfigMap != nil && kubeCloudProviderConfigMap.Exists() {
_, caPath, err := createCA(createTmpDir(), cloudCertFileKey)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new random certificate")
defer kubeCloudProviderConfigMap.RemoveDataKey(cloudCertFileKey)
kubeCloudProviderConfigMap.SetData("--from-file=" + cloudCertFileKey + "=" + caPath)
o.Eventually(kubeCloudManagedConfigMap.GetDataValueOrFail, "5m", "20s").WithArguments(cloudCertFileKey).ShouldNot(o.BeEmpty(),
"A new CA was added to %s but the managed resource %s was not populated", kubeCloudProviderConfigMap, kubeCloudManagedConfigMap)
kubeCloudCertContent = kubeCloudManagedConfigMap.GetDataValueOrFail(cloudCertFileKey)
} else {
logger.Infof("It is not possible to configure a new CloudCA. CloudProviderConfig configmap is not defined in the infrastructure resource or it does not exist")
kubeCloudCertContent = ""
}
}
if kubeCloudCertContent != "" {
logger.Infof("Check that the file is served in the ignition config")
jsonPath = fmt.Sprintf(`storage.files.#(path=="%s")`, kubeCloudCertFile)
o.Eventually(mcp.GetMCSIgnitionConfig,
"6m", "20s").WithArguments(true, ignitionConfig).ShouldNot(
HavePathWithValue(jsonPath, o.BeEmpty()),
"The file %s is not served in the ignition config", kubeCloudCertFile)
logger.Infof("Check that the file has the right content in the nodes")
EventuallyFileExistsInNode(kubeCloudCertFile, kubeCloudCertContent, node, "3m", "20s")
} else {
logger.Infof("No KubeCloud cert was configured and it was not possible to define a new one, we skip the cloudCA validation")
}
logger.Infof("OK!\n")
})
g.It("Author:rioliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-71991-post action of user-ca-bundle change will skip drain,reboot and restart crio service [Disruptive]", func() {
var (
mcName = "mco-tc-71991"
filePath = "/etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt"
mode = 420 // decimal 0644
objsignCABundlePemPath = "/etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem"
node = mcp.GetSortedNodesOrFail()[0]
behaviourValidator = UpdateBehaviourValidator{
RebootNodesShouldBeSkipped: true,
DrainNodesShoulBeSkipped: true,
Checkers: []Checker{
NodeEventsChecker{
EventsSequence: []string{"Reboot", "Drain"},
EventsAreNotTriggered: true,
},
},
}
)
behaviourValidator.Initialize(mcp, nil)
exutil.By("Removing all MCD pods to clean the logs")
o.Expect(RemoveAllMCDPods(oc)).To(o.Succeed(), "Error removing all MCD pods in %s namespace", MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Create a new certificate")
_, caPath, err := createCA(createTmpDir(), "newcert.pem")
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new random certificate")
bcert, err := os.ReadFile(caPath)
o.Expect(err).NotTo(o.HaveOccurred(), "Error reading the new random certificate")
cert := string(bcert)
logger.Infof("OK!\n")
exutil.By("Create the MachineConfig with the new certificate")
file := ign32File{
Path: filePath,
Contents: ign32Contents{
Source: GetBase64EncodedFileSourceContent(cert),
},
Mode: PtrTo(mode),
}
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", string(MarshalOrFail(file)))}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
logger.Infof("OK!\n")
// Check that the MC is applied according to the expected behaviour
behaviourValidator.Validate()
exutil.By("Check that the certificate was created and updated in the cluster by using update-ca-trust command")
certRemote := NewRemoteFile(node, filePath)
objsignCABundleRemote := NewRemoteFile(node, objsignCABundlePemPath)
o.Eventually(certRemote, "5m", "20s").Should(Exist(),
"The file %s does not exist in the node %s after applying the configuration", certRemote.fullPath, node.GetName())
o.Eventually(certRemote, "5m", "20s").Should(exutil.Secure(HaveContent(o.ContainSubstring(cert))),
"%s doesn't have the expected content. It doesn't include the configured certificate", certRemote)
o.Eventually(objsignCABundleRemote, "5m", "20s").Should(Exist(),
"The file %s does not exist in the node %s after applying the configuration", certRemote.fullPath, node.GetName())
o.Expect(certRemote.Fetch()).To(o.Succeed(),
"There was an error trying to the the content of file %s in node %s", certRemote.fullPath, node.GetName())
// diff /etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt | less
// The new certificate should be included in the /etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem file when we execute the update-ca-trust command
o.Expect(objsignCABundleRemote.Read()).To(exutil.Secure(HaveContent(o.ContainSubstring(certRemote.GetTextContent()))),
"In node %s: The the content of the file %s should have been added to the file %s. Command 'update-ca-trust' was not executed by MCD",
node.GetName(), certRemote.fullPath, objsignCABundleRemote.fullPath)
logger.Infof("OK!\n")
exutil.By("Removing all MCD pods to clean the logs before the MC deletion")
o.Expect(RemoveAllMCDPods(oc)).To(o.Succeed(), "Error removing all MCD pods in %s namespace", MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Delete the MachineConfig")
behaviourValidator.Initialize(mcp, nil) // re-initialize the validator to ignore previous events
mc.deleteNoWait()
logger.Infof("OK!\n")
// Check that the MC is removed according to the expected behaviour
behaviourValidator.Validate()
exutil.By("Check that the openshift-config-user-ca-bundle.crt file does not include the certificate anymore and the nodes were updated with update-ca-trust")
// The file is not removed, it is always present but with empty content
o.Eventually(certRemote.Read, "5m", "20s").ShouldNot(exutil.Secure(HaveContent(o.ContainSubstring(cert))),
"The certificate has been removed, but %s still contains the certificate", certRemote.fullPath, node.GetName())
o.Eventually(objsignCABundleRemote, "5m", "20s").Should(Exist(),
"The file %s does not exist in the node %s but it should exist after removing the configuration", certRemote.fullPath, node.GetName())
o.Expect(objsignCABundleRemote.Read()).NotTo(exutil.Secure(HaveContent(o.ContainSubstring(cert))),
"In node %s: The the certificate should have been removed from the file %s. Command 'update-ca-trust' was not executed by MCD after removing the MC",
node.GetName(), certRemote.fullPath, objsignCABundleRemote.fullPath)
logger.Infof("OK!\n")
})
// In the latest branches times are artificially reduced and after about 6 or 7 hours all kube-apiserver certificates are rotated
// If we execute this test case, when this rotation happens the kubeconfig file needs to be updated to use new certificates and all test cases start failing because of this
// If we don't execute this test case, when this rotation happens the kubeconfig needs no update
// We will skip this test case in prow jobs and we will execute it only out of CI
g.It("Author:sregido-DEPRECATED-NonHyperShiftHOST-NonPreRelease-Critical-70857-[P1][OnCLayer] boostrap kubeconfig must be updated when kube-apiserver server CA is rotated [Disruptive]", func() {
var (
mco = NewResource(oc.AsAdmin(), "co", "machine-config")
kubernetesKubeconfigPath = "/etc/kubernetes/kubeconfig"
kubeletKubeconfigPath = "/var/lib/kubelet/kubeconfig"
lbServingSignerSecret = NewSecret(oc.AsAdmin(), "openshift-kube-apiserver-operator", "loadbalancer-serving-signer")
kubeAPIServerCM = NewConfigMap(oc.AsAdmin(), "openshift-config-managed", "kube-apiserver-server-ca")
node = mcp.GetSortedNodesOrFail()[0]
startTime = node.GetDateOrFail()
)
// we are going to fail the test if there is any CO degraded, so we want to know the initial status of the COs
NewResourceList(oc.AsAdmin(), "co").PrintDebugCommand()
exutil.By("Rotate certificate in loadbalancer-serving-signer secret")
newCert := rotateTLSSecretOrFail(lbServingSignerSecret)
logger.Debugf("New TLS cert:\n%s", newCert)
logger.Infof("OK!\n")
exutil.By("Check that the kube-apiserver-serve-ca configmap contains the new TLS secret")
o.Eventually(kubeAPIServerCM.GetDataValue, "5m", "20s").WithArguments("ca-bundle.crt").Should(
exutil.Secure(o.ContainSubstring(newCert)),
"The new TLS certificate was not added to configmap %s", kubeAPIServerCM)
caBundle := strings.TrimSpace(kubeAPIServerCM.GetDataValueOrFail("ca-bundle.crt"))
logger.Debugf("New CA bundle:\n%s", caBundle)
logger.Infof("OK!\n")
exutil.By("Check kubernetes kubconfig file was correctly updated")
// Eventually the kubeconfig file should be updated with the new certificates stored in kube-apiserver-serve-ca
rfKubernetesKubecon := NewRemoteFile(node, kubernetesKubeconfigPath)
o.Eventually(func() (string, error) {
err := rfKubernetesKubecon.Fetch()
if err != nil {
return "", err
}
cert, err := getCertsFromKubeconfig(rfKubernetesKubecon.GetTextContent())
if err != nil {
return "", err
}
logger.Debugf("Kube cert:\n%s", cert)
return strings.TrimSpace(cert), nil
}, "5m", "10s").
Should(exutil.Secure(o.Equal(caBundle)),
"%s does not contain the certificates stored in %s.", kubernetesKubeconfigPath, kubeAPIServerCM)
o.Expect(rfKubernetesKubecon).To(o.And(
HaveOctalPermissions("0600"),
HaveOwner("root"),
HaveGroup("root")),
"Wrong security attributes in %s", rfKubernetesKubecon)
logger.Infof("OK!\n")
exutil.By("Check kubelet kubconfig file was correctly updated")
// Eventually the kubeconfig file should be updated with the new certificates stored in kube-apiserver-serve-ca
o.Eventually(func() (string, error) {
rfKubeletKubecon := NewRemoteFile(node, kubeletKubeconfigPath)
err := rfKubeletKubecon.Fetch()
if err != nil {
return "", err
}
cert, err := getCertsFromKubeconfig(rfKubeletKubecon.GetTextContent())
if err != nil {
return "", err
}
return cert, nil
}, "5m", "10s").
Should(exutil.Secure(o.Equal(caBundle)),
"%s does not contain the certificates stored in %s.", kubernetesKubeconfigPath, kubeAPIServerCM)
o.Expect(rfKubernetesKubecon).To(o.And(
HaveOctalPermissions("0600"),
HaveOwner("root"),
HaveGroup("root")),
"Wrong security attributes in %s", rfKubernetesKubecon)
logger.Infof("OK!\n")
exutil.By("Check that kubelet was restarted")
o.Eventually(node.GetUnitActiveEnterTime, "6m", "20s").WithArguments("kubelet.service").Should(o.BeTemporally(">", startTime),
"Kubelet service was NOT restarted, but it should be")
logger.Infof("OK!\n")
exutil.By("Check that MCO pods are healthy")
o.Expect(waitForAllMCOPodsReady(oc.AsAdmin(), 10*time.Minute)).To(o.Succeed(),
"MCO pods are not Ready after cert rotation")
o.Eventually(mco, "5m", "20s").ShouldNot(BeDegraded(), "Error! %s is degraded:\n%s", mco, mco.PrettyString())
logger.Infof("OK!\n")
exutil.By("Check that all cluster operators are healthy")
checkAllOperatorsHealthy(oc.AsAdmin(), "20m", "30s")
logger.Infof("OK!\n")
})
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-75222-[P2] tlSecurityProfile switch and check the expected tlsMinVersion and cipheres suite are seen in MCS,MSS and rbac-kube-proxy pod logs[Disruptive]", func() {
var (
apiServer = NewResource(oc.AsAdmin(), "apiserver", "cluster")
)
exutil.By("Verify for Intermediate TLS Profile")
csNameList := getCipherSuitesNameforSpecificVersion(VersionTLS12)
var csVersion12 []string
for i := range csNameList {
if !strings.Contains(csNameList[i], "_CBC_") {
csVersion12 = append(csVersion12, csNameList[i])
}
}
validateCorrectTLSProfileSecurity(oc, "", "VersionTLS12", csVersion12)
logger.Infof("OK!\n")
defer func(initialConfig string) {
exutil.By("Restore with previous apiserver value")
apiServer.SetSpec(initialConfig)
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "6m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
}(apiServer.GetSpecOrFail())
exutil.By("Patch the Custom tlsSecurityProfile")
o.Expect(apiServer.Patch("json",
`[{ "op": "add", "path": "/spec/tlsSecurityProfile", "value": {"type": "Custom","custom": {"ciphers": ["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES128-GCM-SHA256" ],"minTLSVersion": "VersionTLS11"}}}]`)).To(o.Succeed(), "Error patching tlsSecurityProfile")
logger.Infof("OK!\n")
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "12m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify for Custom TLS Profile")
customCipherSuite := []string{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"}
validateCorrectTLSProfileSecurity(oc, "Custom", "VersionTLS11", customCipherSuite)
logger.Infof("OK!\n")
exutil.By("Patch the Old tlsSecurityProfile")
o.Expect(apiServer.Patch("json",
`[{ "op": "add", "path": "/spec/tlsSecurityProfile", "value": {"type": "Old","old": {}}}]`)).To(o.Succeed(), "Error patching http proxy")
logger.Infof("OK!\n")
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "12m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
exutil.By("Verify for Old TLS Profile")
csNameList = getCipherSuitesNameforSpecificVersion(VersionTLS10)
validateCorrectTLSProfileSecurity(oc, "Old", "VersionTLS10", csNameList)
logger.Infof("OK!\n")
// For now Modern Profile is not supported
match := `Unsupported value: "Modern"`
exutil.By("Patch the Modern tlsSecurityProfile")
tlsPatch := apiServer.Patch("json",
`[{ "op": "add", "path": "/spec/tlsSecurityProfile", "value": {"type": "Modern"}}]`)
o.Expect(tlsPatch.(*exutil.ExitError).StdErr).To(o.ContainSubstring(match))
})
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-75543-tlsSecurity setting is also propagated on node in kubelet.conf [Disruptive]", func() {
var (
node = wMcp.GetSortedNodesOrFail()[0]
apiServer = NewResource(oc.AsAdmin(), "apiserver", "cluster")
kcName = "tc-75543-set-kubelet-custom-tls-profile"
kcTemplate = generateTemplateAbsolutePath("custom-tls-profile-kubelet-config.yaml")
customCipherSuite = []string{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"}
)
csNameList := getCipherSuitesNameforSpecificVersion(VersionTLS12)
var csVersion12 []string
for _, name := range csNameList {
if !strings.Contains(name, "_CBC_") {
csVersion12 = append(csVersion12, name)
}
}
exutil.By("Verify for Intermediate TLS Profile in kubeletConfig")
validateCorrectTLSProfileSecurityInKubeletConfig(node, "VersionTLS12", csVersion12)
exutil.By("Verify for Intermediate TLS Profile pod logs")
validateCorrectTLSProfileSecurity(oc, "", "VersionTLS12", csVersion12)
defer func(initialConfig string) {
exutil.By("Restore with previous apiserver value")
apiServer.SetSpec(initialConfig)
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "6m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
}(apiServer.GetSpecOrFail())
exutil.By("Patch the Old tlsSecurityProfile")
o.Expect(apiServer.Patch("json",
`[{ "op": "add", "path": "/spec/tlsSecurityProfile", "value": {"type": "Old","old": {}}}]`)).To(o.Succeed(), "Error patching http proxy")
logger.Infof("OK!\n")
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "12m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify for Old TLS Profile in kubeletConfig")
csVersion10 := getCipherSuitesNameforSpecificVersion(VersionTLS10)
validateCorrectTLSProfileSecurityInKubeletConfig(node, "VersionTLS10", csVersion10)
exutil.By("Verify for Old TLS Profile in pod logs")
validateCorrectTLSProfileSecurity(oc, "Old", "VersionTLS10", csVersion10)
exutil.By("Create Kubeletconfig to configure a custom tlsSecurityProfile")
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
defer kc.Delete()
kc.create()
logger.Infof("KubeletConfig was created. Waiting for success.")
kc.waitUntilSuccess("5m")
logger.Infof("OK!\n")
exutil.By("Wait for Worker MachineConfigPool to be updated")
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify for Custom TLS Profile in kubeletConfig")
validateCorrectTLSProfileSecurityInKubeletConfig(node, "VersionTLS11", customCipherSuite)
exutil.By("Patch the Intermediate tlsSecurityProfile to check kubeletconfig settings are not changed")
o.Expect(apiServer.Patch("json",
`[{ "op": "add", "path": "/spec/tlsSecurityProfile", "value": {"type": "Intermediate","intermediate": {}}}]`)).To(o.Succeed(), "Error patching http proxy")
logger.Infof("OK!\n")
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "12m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify for Intermediate TLS Profile pod logs")
validateCorrectTLSProfileSecurity(oc, "Intermediate", "VersionTLS12", csVersion12)
exutil.By("Verify for Custom TLS Profile not changed in kubeletConfig")
validateCorrectTLSProfileSecurityInKubeletConfig(node, "VersionTLS11", customCipherSuite)
exutil.By("Delete create kubeletConfig template")
kc.DeleteOrFail()
o.Expect(kc).NotTo(Exist())
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("To check the kubeletConfig to have same tls setting as of API server")
validateCorrectTLSProfileSecurityInKubeletConfig(node, "VersionTLS12", csVersion12)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Critical-76587-[P1] MCS port should not expose weak ciphers to external client from master node IP [Disruptive]", func() {
var (
node = mcp.GetSortedNodesOrFail()[0]
port = 22623
insecureCiphers = []string{"TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384"}
)
exutil.By("Remove iptable rules")
logger.Infof("Remove the IPV4 iptables rules that block the ignition config")
removedRules, err := node.RemoveIPTablesRulesByRegexp(fmt.Sprintf("%d", port))
defer node.ExecIPTables(removedRules)
o.Expect(err).NotTo(o.HaveOccurred(), "Error removing the IPv4 iptables rules for port %s in node %s", port, node.GetName())
logger.Infof("Remove the IPV6 ip6tables rules that block the ignition config")
removed6Rules, err := node.RemoveIP6TablesRulesByRegexp(fmt.Sprintf("%d", port))
o.Expect(err).NotTo(o.HaveOccurred(), "Error removing the IPv6 iptables rules for port %s in node %s", port, node.GetName())
defer node.ExecIP6Tables(removed6Rules)
logger.Infof("OK!\n")
internalAPIServerURI, err := GetAPIServerInternalURI(mcp.oc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the internal apiserver URL")
exutil.By("Check that no weak cipher is exposed")
url := fmt.Sprintf("%s:%d", internalAPIServerURI, port)
cipherOutput, cipherErr := node.DebugNodeWithOptions([]string{"--image=" + TestSSLImage, "-n", MachineConfigNamespace}, "testssl.sh", "--color", "0", url)
logger.Infof("test ssh script output:\n %s", cipherOutput)
o.Expect(cipherErr).NotTo(o.HaveOccurred())
for _, insecureCipher := range insecureCiphers {
logger.Infof("Verify %s", insecureCipher)
o.Expect(cipherOutput).NotTo(o.ContainSubstring(insecureCipher),
"MCO is exposing weak ciphers in %s", internalAPIServerURI)
logger.Infof("OK")
}
logger.Infof("Verify SWEET32")
o.Expect(cipherOutput).To(o.MatchRegexp("SWEET32 .*"+regexp.QuoteMeta("not vulnerable (OK)")),
"%s is vulnerable to SWEET32", internalAPIServerURI)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-75521-[P1] Log details for malformed certificates. No infinite loop [Disruptive]", func() {
var (
configCM = NewConfigMap(oc.AsAdmin(), "openshift-config", "cloud-provider-config")
bundleKey = "ca-bundle.pem"
malformedCertFilePath = generateTemplateAbsolutePath("malformedcert.pem")
mcc = NewController(oc.AsAdmin())
expectedErrorMsg = "Malformed certificate 'CloudProviderCAData' detected and is not syncing. Error: x509: malformed certificate, Cert data: -----BEGIN CERTIFICATE---"
restoreFunc func() error
)
if !configCM.Exists() {
g.Skip(fmt.Sprintf("%s does not exist, we cannot recofigure it", configCM))
}
currentBundle, hasKey, err := configCM.HasKey(bundleKey)
o.Expect(err).NotTo(o.HaveOccurred(), "Error checking if key %s exists in %s", bundleKey, configCM)
if hasKey {
restoreFunc = func() error {
logger.Infof("Restoring initial data in %s", configCM)
configCM.oc.NotShowInfo()
return configCM.SetData(bundleKey + "=" + currentBundle)
}
} else {
restoreFunc = func() error {
return configCM.RemoveDataKey(bundleKey)
}
}
defer restoreFunc()
exutil.By("Configure a malformed certificate")
o.Expect(
configCM.SetData("--from-file="+bundleKey+"="+malformedCertFilePath),
).To(o.Succeed(), "Error configuring the %s value in %s", bundleKey, malformedCertFilePath)
logger.Infof("OK!\n")
exutil.By("Check that the error is correctly reported")
o.Eventually(mcc.GetLogs, "5m", "20s").Should(o.ContainSubstring(expectedErrorMsg),
"The malformed certificate is not correctly reported in the controller logs")
logger.Infof("OK!\n")
exutil.By("Restore the initial certificate values")
o.Expect(restoreFunc()).To(o.Succeed(),
"Error restoring the initial certificate values in %s", configCM)
logger.Infof("OK!\n")
exutil.By("Check that no more errors are reported")
currentLogs, err := mcc.GetLogs()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MCC logs")
o.Eventually(func() (string, error) {
// we return the new recently printed logs only
var diffLogs string
newLogs, err := mcc.GetLogs()
if err != nil {
return "", err
}
diffLogs = strings.ReplaceAll(newLogs, currentLogs, "")
currentLogs = newLogs
logger.Infof("Checking diff logs: %s", diffLogs)
return diffLogs, nil
}, "5m", "20s").ShouldNot(o.ContainSubstring(expectedErrorMsg),
"The certificate was fixed but the controller is still reporting an error")
o.Consistently(func() (string, error) {
// we return the new recently printed logs only
var diffLogs string
newLogs, err := mcc.GetLogs()
if err != nil {
return "", err
}
diffLogs = strings.ReplaceAll(newLogs, currentLogs, "")
currentLogs = newLogs
logger.Infof("Checking diff logs: %s", diffLogs)
return diffLogs, nil
}, "1m", "20s").ShouldNot(o.ContainSubstring(expectedErrorMsg),
"The certificate was fixed but the controller is still reporting an error")
logger.Infof("OK!\n")
})
})
// EventuallyFileExistsInNode fails the test if the certificate file does not exist in the node after the time specified as parameters
func EventuallyImageRegistryCertificateExistsInNode(certFileName, certContent string, node Node, timeout, poll string) {
certPath := filepath.Join(ImageRegistryCertificatesDir, certFileName, ImageRegistryCertificatesFileName)
EventuallyFileExistsInNode(certPath, certContent, node, timeout, poll)
}
// EventuallyFileExistsInNode fails the test if the file does not exist in the node after the time specified as parameters
func EventuallyFileExistsInNode(filePath, expectedContent string, node Node, timeout, poll string) {
logger.Infof("Checking file %s in node %s", filePath, node.GetName())
rfCert := NewRemoteFile(node, filePath)
o.Eventually(func(gm o.Gomega) { // Passing o.Gomega as parameter we can use assertions inside the Eventually function without breaking the retries.
gm.Expect(rfCert.Fetch()).To(o.Succeed(),
"Cannot read the certificate file %s in node:%s ", rfCert.fullPath, node.GetName())
gm.Expect(rfCert.GetTextContent()).To(exutil.Secure(o.Equal(expectedContent)),
"the certificate stored in file %s does not match the expected value", rfCert.fullPath)
}, timeout, poll).
Should(o.Succeed(),
"The file %s in node %s does not contain the expected certificate.", rfCert.GetFullPath(), node.GetName())
}
// BundleFileIsCATrusted check that the provided bundle file is included in file /etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem which means that it is ca-trusted
func BundleFileIsCATrusted(bundleFile string, node Node) (bool, error) {
var (
objsignCABundlePemPath = "/etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem"
objsignCABundleRemote = NewRemoteFile(node, objsignCABundlePemPath)
bundleRemote = NewRemoteFile(node, bundleFile)
)
if !bundleRemote.Exists() {
return false, fmt.Errorf("File %s does not exist", bundleRemote.GetFullPath())
}
if !objsignCABundleRemote.Exists() {
return false, fmt.Errorf("File %s does not exist", objsignCABundleRemote.GetFullPath())
}
err := bundleRemote.Fetch()
if err != nil {
return false, err
}
err = objsignCABundleRemote.Fetch()
if err != nil {
return false, err
}
bundleCerts, err := splitBundleCertificates([]byte(bundleRemote.GetTextContent()))
if err != nil {
return false, err
}
objsignCABundleCerts, err := splitBundleCertificates([]byte(objsignCABundleRemote.GetTextContent()))
if err != nil {
return false, err
}
// The new certificates should be included in the /etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem file when we execute the update-ca-trust command
for _, bundleCert := range bundleCerts {
found := false
for _, objsignCert := range objsignCABundleCerts {
if bundleCert.Equal(objsignCert) {
found = true
break
}
}
if !found {
return false, nil
}
}
return true, nil
}
// splitBundleCertificates reads a pem bundle and returns a slice with all the certificates contained in this pem bundle
func splitBundleCertificates(pemBundle []byte) ([]*x509.Certificate, error) {
certsList := []*x509.Certificate{}
for {
block, rest := pem.Decode(pemBundle)
if block == nil {
return nil, fmt.Errorf("failed to parse certificate PEM:\n%s", string(pemBundle))
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certsList = append(certsList, cert)
pemBundle = rest
if len(rest) == 0 {
break
}
}
return certsList, nil
}
// getCipherSuitesNameforSpecificVersion returns the names of cipher suite for the provided version
func getCipherSuitesNameforSpecificVersion(version uint16) []string {
cipherSuites := getCipherSuitesForVersion(version)
cipherSuiteNames := []string{}
for _, cipherSuite := range cipherSuites {
cipherSuiteNames = append(cipherSuiteNames, cipherSuite.Name)
}
return cipherSuiteNames
}
// getCipherSuitesForVersion returns the cipher suite list along with name,ID, security issues for provided version
func getCipherSuitesForVersion(version uint16) []*tls.CipherSuite {
var suites []*tls.CipherSuite
for _, cs := range tls.CipherSuites() {
for _, v := range cs.SupportedVersions {
if v == version {
suites = append(suites, cs)
break
}
}
}
return suites
}
// validateCorrectTLSProfileSecurity helps to check the valid tls-min-version and tls-cipher-suite
func validateCorrectTLSProfileSecurity(oc *exutil.CLI, tlsSecurityProfile, tlsMinVersionStr string, cipherSuite []string) {
var (
containerArgsPath = `{.spec.containers[*].args[*]}`
tlsProfileTypePath = `{.spec.tlsSecurityProfile.type}`
apiServer = NewResource(oc.AsAdmin(), "apiserver", "cluster")
)
exutil.By("Get the kube-rbac-proxy, MCC, MCS pods")
getKubeProxyPod, err := getAllKubeProxyPod(oc.AsAdmin(), MachineConfigNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
kubeproxy := NewNamespacedResource(oc.AsAdmin(), "pod", MachineConfigNamespace, getKubeProxyPod[0])
logger.Infof("%s\n", kubeproxy.GetOrFail(containerArgsPath))
logger.Infof("OK!\n")
mccPodName, err := getMachineConfigControllerPod(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred())
mcc := NewNamespacedResource(oc.AsAdmin(), "pod", MachineConfigNamespace, mccPodName)
logger.Infof("%s\n", mcc.GetOrFail(containerArgsPath))
logger.Infof("OK!\n")
mcspod, err := GetMCSPodNames(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred())
mcsLogs, err := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigServer, mcspod[0], "")
o.Expect(err).NotTo(o.HaveOccurred())
logger.Infof("%s\n", mcsLogs)
logger.Infof("OK!\n")
logger.Infof("%s\n", apiServer.GetOrFail(tlsProfileTypePath))
logger.Infof("OK!\n")
o.Expect(apiServer.GetOrFail(tlsProfileTypePath)).To(o.ContainSubstring(tlsSecurityProfile), "The %s tlsSecuirtyProfile is not applied properly", tlsSecurityProfile)
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf("To check the valid tls-min-version for %s", tlsSecurityProfile))
o.Expect(kubeproxy.GetOrFail(containerArgsPath)).To(o.ContainSubstring("--tls-min-version=%s", tlsMinVersionStr), "Error getting required tls-min-version for given tlsSecuirtyProfile in %s pod", getKubeProxyPod[0])
o.Expect(mcc.GetOrFail(containerArgsPath)).To(o.ContainSubstring("--tls-min-version=%s", tlsMinVersionStr), "Error getting required tls-min-version for tlsSecuirtyProfile in %s pod", mccPodName)
o.Expect(exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigServer, mcspod[0], "")).To(o.ContainSubstring(tlsMinVersionStr), "Error getting required tls-min-version for %s pod", mcspod[0])
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf("To check the valid tls-cipher-suite for %s", tlsSecurityProfile))
for i := range cipherSuite {
o.Expect(kubeproxy.GetOrFail(containerArgsPath)).To(o.ContainSubstring(cipherSuite[i]), "Error getting %s cipher suite for given tlsSecuirtyProfile of %s pod", cipherSuite[i], getKubeProxyPod[0])
o.Expect(mcc.GetOrFail(containerArgsPath)).To(o.ContainSubstring(cipherSuite[i]), "Error getting %s cipher suite for given tlsSecuirtyProfile of %s pod", cipherSuite[i], mccPodName)
o.Expect(exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigServer, mcspod[0], "")).To(o.ContainSubstring(cipherSuite[i]), "Error getting %s cipher suite for given tlsSecuirtyProfile of %s pod", cipherSuite[i], mcspod[0])
}
logger.Infof("OK!\n")
}
func validateCorrectTLSProfileSecurityInKubeletConfig(node Node, tlsMinVersion string, cipherSuite []string) {
stdout, err := node.DebugNodeWithChroot("cat", "/etc/kubernetes/kubelet.conf")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("To check the kubeletConfig to have same tls setting as of API server")
o.Expect(stdout).To(o.ContainSubstring("tlsMinVersion: %s", tlsMinVersion), "Error %s tlsMinVersion is not updated in kubelet config", tlsMinVersion)
for _, csname := range cipherSuite {
o.Expect(stdout).To(o.ContainSubstring(csname), "Error %s cipher suite is not updated in kubelet config", csname)
}
logger.Infof("OK!\n")
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
fb63f860-13f7-4d3b-958c-1091ceec8cd6
|
EventuallyImageRegistryCertificateExistsInNode
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
func EventuallyImageRegistryCertificateExistsInNode(certFileName, certContent string, node Node, timeout, poll string) {
certPath := filepath.Join(ImageRegistryCertificatesDir, certFileName, ImageRegistryCertificatesFileName)
EventuallyFileExistsInNode(certPath, certContent, node, timeout, poll)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
db25e8c5-6291-4145-aa71-c190b424e89c
|
EventuallyFileExistsInNode
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
func EventuallyFileExistsInNode(filePath, expectedContent string, node Node, timeout, poll string) {
logger.Infof("Checking file %s in node %s", filePath, node.GetName())
rfCert := NewRemoteFile(node, filePath)
o.Eventually(func(gm o.Gomega) { // Passing o.Gomega as parameter we can use assertions inside the Eventually function without breaking the retries.
gm.Expect(rfCert.Fetch()).To(o.Succeed(),
"Cannot read the certificate file %s in node:%s ", rfCert.fullPath, node.GetName())
gm.Expect(rfCert.GetTextContent()).To(exutil.Secure(o.Equal(expectedContent)),
"the certificate stored in file %s does not match the expected value", rfCert.fullPath)
}, timeout, poll).
Should(o.Succeed(),
"The file %s in node %s does not contain the expected certificate.", rfCert.GetFullPath(), node.GetName())
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
925e18fa-1e64-4f82-99cd-6d133739bbc6
|
BundleFileIsCATrusted
|
['"encoding/pem"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
func BundleFileIsCATrusted(bundleFile string, node Node) (bool, error) {
var (
objsignCABundlePemPath = "/etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem"
objsignCABundleRemote = NewRemoteFile(node, objsignCABundlePemPath)
bundleRemote = NewRemoteFile(node, bundleFile)
)
if !bundleRemote.Exists() {
return false, fmt.Errorf("File %s does not exist", bundleRemote.GetFullPath())
}
if !objsignCABundleRemote.Exists() {
return false, fmt.Errorf("File %s does not exist", objsignCABundleRemote.GetFullPath())
}
err := bundleRemote.Fetch()
if err != nil {
return false, err
}
err = objsignCABundleRemote.Fetch()
if err != nil {
return false, err
}
bundleCerts, err := splitBundleCertificates([]byte(bundleRemote.GetTextContent()))
if err != nil {
return false, err
}
objsignCABundleCerts, err := splitBundleCertificates([]byte(objsignCABundleRemote.GetTextContent()))
if err != nil {
return false, err
}
// The new certificates should be included in the /etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem file when we execute the update-ca-trust command
for _, bundleCert := range bundleCerts {
found := false
for _, objsignCert := range objsignCABundleCerts {
if bundleCert.Equal(objsignCert) {
found = true
break
}
}
if !found {
return false, nil
}
}
return true, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
253c2963-1243-412b-9a73-a984e0d35cb4
|
splitBundleCertificates
|
['"crypto/x509"', '"encoding/pem"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
func splitBundleCertificates(pemBundle []byte) ([]*x509.Certificate, error) {
certsList := []*x509.Certificate{}
for {
block, rest := pem.Decode(pemBundle)
if block == nil {
return nil, fmt.Errorf("failed to parse certificate PEM:\n%s", string(pemBundle))
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certsList = append(certsList, cert)
pemBundle = rest
if len(rest) == 0 {
break
}
}
return certsList, nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d0437c77-7f3d-4051-a14c-c167ed2e9ea3
|
getCipherSuitesNameforSpecificVersion
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
func getCipherSuitesNameforSpecificVersion(version uint16) []string {
cipherSuites := getCipherSuitesForVersion(version)
cipherSuiteNames := []string{}
for _, cipherSuite := range cipherSuites {
cipherSuiteNames = append(cipherSuiteNames, cipherSuite.Name)
}
return cipherSuiteNames
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
241afebf-39bd-44b0-82a1-ce59475591e4
|
getCipherSuitesForVersion
|
['"crypto/tls"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
func getCipherSuitesForVersion(version uint16) []*tls.CipherSuite {
var suites []*tls.CipherSuite
for _, cs := range tls.CipherSuites() {
for _, v := range cs.SupportedVersions {
if v == version {
suites = append(suites, cs)
break
}
}
}
return suites
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
7b9195dd-4e7b-4887-ab18-d3ca96fb55c0
|
validateCorrectTLSProfileSecurity
|
['"crypto/tls"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
func validateCorrectTLSProfileSecurity(oc *exutil.CLI, tlsSecurityProfile, tlsMinVersionStr string, cipherSuite []string) {
var (
containerArgsPath = `{.spec.containers[*].args[*]}`
tlsProfileTypePath = `{.spec.tlsSecurityProfile.type}`
apiServer = NewResource(oc.AsAdmin(), "apiserver", "cluster")
)
exutil.By("Get the kube-rbac-proxy, MCC, MCS pods")
getKubeProxyPod, err := getAllKubeProxyPod(oc.AsAdmin(), MachineConfigNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
kubeproxy := NewNamespacedResource(oc.AsAdmin(), "pod", MachineConfigNamespace, getKubeProxyPod[0])
logger.Infof("%s\n", kubeproxy.GetOrFail(containerArgsPath))
logger.Infof("OK!\n")
mccPodName, err := getMachineConfigControllerPod(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred())
mcc := NewNamespacedResource(oc.AsAdmin(), "pod", MachineConfigNamespace, mccPodName)
logger.Infof("%s\n", mcc.GetOrFail(containerArgsPath))
logger.Infof("OK!\n")
mcspod, err := GetMCSPodNames(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred())
mcsLogs, err := exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigServer, mcspod[0], "")
o.Expect(err).NotTo(o.HaveOccurred())
logger.Infof("%s\n", mcsLogs)
logger.Infof("OK!\n")
logger.Infof("%s\n", apiServer.GetOrFail(tlsProfileTypePath))
logger.Infof("OK!\n")
o.Expect(apiServer.GetOrFail(tlsProfileTypePath)).To(o.ContainSubstring(tlsSecurityProfile), "The %s tlsSecuirtyProfile is not applied properly", tlsSecurityProfile)
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf("To check the valid tls-min-version for %s", tlsSecurityProfile))
o.Expect(kubeproxy.GetOrFail(containerArgsPath)).To(o.ContainSubstring("--tls-min-version=%s", tlsMinVersionStr), "Error getting required tls-min-version for given tlsSecuirtyProfile in %s pod", getKubeProxyPod[0])
o.Expect(mcc.GetOrFail(containerArgsPath)).To(o.ContainSubstring("--tls-min-version=%s", tlsMinVersionStr), "Error getting required tls-min-version for tlsSecuirtyProfile in %s pod", mccPodName)
o.Expect(exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigServer, mcspod[0], "")).To(o.ContainSubstring(tlsMinVersionStr), "Error getting required tls-min-version for %s pod", mcspod[0])
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf("To check the valid tls-cipher-suite for %s", tlsSecurityProfile))
for i := range cipherSuite {
o.Expect(kubeproxy.GetOrFail(containerArgsPath)).To(o.ContainSubstring(cipherSuite[i]), "Error getting %s cipher suite for given tlsSecuirtyProfile of %s pod", cipherSuite[i], getKubeProxyPod[0])
o.Expect(mcc.GetOrFail(containerArgsPath)).To(o.ContainSubstring(cipherSuite[i]), "Error getting %s cipher suite for given tlsSecuirtyProfile of %s pod", cipherSuite[i], mccPodName)
o.Expect(exutil.GetSpecificPodLogs(oc, MachineConfigNamespace, MachineConfigServer, mcspod[0], "")).To(o.ContainSubstring(cipherSuite[i]), "Error getting %s cipher suite for given tlsSecuirtyProfile of %s pod", cipherSuite[i], mcspod[0])
}
logger.Infof("OK!\n")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
8e3dd5a7-b597-4edb-bfc6-d26a765aab7d
|
validateCorrectTLSProfileSecurityInKubeletConfig
|
['"crypto/tls"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
func validateCorrectTLSProfileSecurityInKubeletConfig(node Node, tlsMinVersion string, cipherSuite []string) {
stdout, err := node.DebugNodeWithChroot("cat", "/etc/kubernetes/kubelet.conf")
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("To check the kubeletConfig to have same tls setting as of API server")
o.Expect(stdout).To(o.ContainSubstring("tlsMinVersion: %s", tlsMinVersion), "Error %s tlsMinVersion is not updated in kubelet config", tlsMinVersion)
for _, csname := range cipherSuite {
o.Expect(stdout).To(o.ContainSubstring(csname), "Error %s cipher suite is not updated in kubelet config", csname)
}
logger.Infof("OK!\n")
}
|
mco
| ||||
test case
|
openshift/openshift-tests-private
|
453ddee1-6869-4c0d-a92c-84e5bdacd3ac
|
Author:sregidor-NonHyperShiftHOST-Medium-66048-[P1][OnCLayer] Check image registry user bundle certificate [Disruptive]
|
['"encoding/pem"', '"fmt"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
g.It("Author:sregidor-NonHyperShiftHOST-Medium-66048-[P1][OnCLayer] Check image registry user bundle certificate [Disruptive]", func() {
if !IsCapabilityEnabled(oc.AsAdmin(), "ImageRegistry") {
g.Skip("ImageRegistry is not installed, skip this test")
}
var (
mergedTrustedImageRegistryCACM = NewConfigMap(oc.AsAdmin(), "openshift-config-managed", "merged-trusted-image-registry-ca")
imageConfig = NewResource(oc.AsAdmin(), "image.config.openshift.io", "cluster")
certFileName = "caKey.pem"
cmName = "cm-test-ca"
)
exutil.By("Get current image.config spec")
initImageConfigSpec := imageConfig.GetOrFail(`{.spec}`)
defer func() {
logger.Infof("Restore original image.config spec: %s", initImageConfigSpec)
_ = imageConfig.Patch("json", `[{ "op": "add", "path": "/spec", "value": `+initImageConfigSpec+`}]`)
}()
initialCMCreationTime := mergedTrustedImageRegistryCACM.GetOrFail(`{.metadata.creationTimestamp}`)
logger.Infof("OK!\n")
exutil.By("Add new additionalTrustedCA to the image.config resource")
logger.Infof("Creating new config map with a new CA")
additionalTrustedCM, err := CreateConfigMapWithRandomCert(oc.AsAdmin(), "openshift-config", cmName, certFileName)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating a configmap with a CA")
defer additionalTrustedCM.Delete()
newCertificate := additionalTrustedCM.GetDataValueOrFail(certFileName)
logger.Infof("Configure the image.config resource to use the new configmap")
o.Expect(imageConfig.Patch("merge", fmt.Sprintf(`{"spec": {"additionalTrustedCA": {"name": "%s"}}}`, cmName))).To(
o.Succeed(),
"Error setting the new image.config spec")
logger.Infof("OK!\n")
exutil.By("Check that the ControllerConfig has been properly synced")
o.Eventually(cc.GetImageRegistryBundleUserDataByFileName,
"3m", "20s").WithArguments(certFileName).Should(
exutil.Secure(o.Equal(newCertificate)),
"The new certificate was not properly added to the controller config imageRegistryBundleUserData")
usrDataInfo, err := GetCertificatesInfoFromPemBundle(certFileName, []byte(newCertificate))
o.Expect(err).NotTo(o.HaveOccurred(),
"Error extracting certificate info from the new additional trusted CA")
o.Expect(cc.GetCertificatesInfoByBundleFileName(certFileName)).To(
o.Equal(usrDataInfo),
"The information reported in the ControllerConfig for bundle file %s is wrong", certFileName)
logger.Infof("OK!\n")
exutil.By("Check that the merged-trusted-image-registry-ca configmap has been properly synced")
o.Expect(mergedTrustedImageRegistryCACM.GetDataValueOrFail(certFileName)).To(
exutil.Secure(o.Equal(newCertificate)),
"The configmap -n openshift-config-managed merged-trusted-image-registry-ca was not properly synced")
o.Expect(mergedTrustedImageRegistryCACM.Get(`{.metadata.creationTimestamp}`)).To(
o.Equal(initialCMCreationTime),
"The %s resource was not patched! it was recreated! The configmap should be patched since https://issues.redhat.com/browse/OCPBUGS-18800")
logger.Infof("OK!\n")
// We verify that all nodes in the pools have the new certificate (be aware that windows nodes do not belong to any pool, we are skipping them)
for _, node := range append(wMcp.GetNodesOrFail(), mMcp.GetNodesOrFail()...) {
exutil.By(fmt.Sprintf("Check that the certificate was correctly deployed in node %s", node.GetName()))
EventuallyImageRegistryCertificateExistsInNode(certFileName, newCertificate, node, "5m", "30s")
logger.Infof("OK!\n")
}
exutil.By("Configure an empty image.config spec")
o.Expect(imageConfig.Patch("json", `[{ "op": "add", "path": "/spec", "value": {}}]`)).To(
o.Succeed(),
"Error configuring an empty image.config spec")
logger.Infof("OK!\n")
exutil.By("Check that the ControllerConfig was properly synced")
o.Eventually(cc.GetImageRegistryBundleUserData, "45s", "20s").ShouldNot(
exutil.Secure(o.HaveKey(certFileName)),
"The new certificate was not properly removed from the ControllerConfig imageRegistryBundleUserData")
o.Expect(cc.GetCertificatesInfoByBundleFileName(certFileName)).To(
exutil.Secure(o.BeEmpty()),
"The information reported in the ControllerConfig for bundle file %s was not removed", certFileName)
logger.Infof("OK!\n")
exutil.By("Check that the merged-trusted-image-registry-ca configmap has been properly synced")
o.Expect(mergedTrustedImageRegistryCACM.GetDataMap()).NotTo(
exutil.Secure(o.HaveKey(newCertificate)),
"The certificate was not removed from the configmap -n openshift-config-managed merged-trusted-image-registry-ca")
o.Expect(mergedTrustedImageRegistryCACM.Get(`{.metadata.creationTimestamp}`)).To(
o.Equal(initialCMCreationTime),
"The %s resource was not patched! it was recreated! The configmap should be patched since https://issues.redhat.com/browse/OCPBUGS-18800")
logger.Infof("OK!\n")
// We verify that the certificate was removed from all nodes in the pools (be aware that windows nodes do not belong to any pool, we are skipping them)
for _, node := range append(wMcp.GetNodesOrFail(), mMcp.GetNodesOrFail()...) {
exutil.By(fmt.Sprintf("Check that the certificate was correctly removed from node %s", node.GetName()))
certPath := filepath.Join(ImageRegistryCertificatesDir, certFileName, ImageRegistryCertificatesFileName)
rfCert := NewRemoteFile(node, certPath)
logger.Infof("Checking certificate file %s", certPath)
o.Eventually(rfCert.Exists, "5m", "20s").Should(
o.BeFalse(),
"The certificate %s was not removed from the node %s. But it should have been removed after the image.config reconfiguration",
certPath, node.GetName())
logger.Infof("OK!\n")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
0bb805b9-6859-4dfa-814b-064b98e30ae9
|
Author:sregidor-NonHyperShiftHOST-High-67660-[P2][OnCLayer] MCS generates ignition configs with certs [Disruptive]
|
['"encoding/pem"', '"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
g.It("Author:sregidor-NonHyperShiftHOST-High-67660-[P2][OnCLayer] MCS generates ignition configs with certs [Disruptive]", func() {
var (
proxy = NewResource(oc.AsAdmin(), "proxy", "cluster")
certFileKey = "ca-bundle.crt"
cloudCertFileKey = "ca-bundle.pem"
userCABundleConfigMap = NewConfigMap(oc.AsAdmin(), "openshift-config", "user-ca-bundle")
cmName = "test-proxy-config"
cmNamespace = "openshift-config"
proxyConfigMap *ConfigMap
kubeCloudProviderConfigMap = GetCloudProviderConfigMap(oc.AsAdmin())
kubeCloudManagedConfigMap = NewConfigMap(oc.AsAdmin(), "openshift-config-managed", "kube-cloud-config")
kubeCertFile = "/etc/kubernetes/kubelet-ca.crt"
userCABundleCertFile = "/etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt"
kubeCloudCertFile = "/etc/kubernetes/static-pod-resources/configmaps/cloud-config/ca-bundle.pem"
ignitionConfig = "3.4.0"
node = mcp.GetSortedNodesOrFail()[0]
)
logger.Infof("Using pool %s for testing", mcp.GetName())
exutil.By("Getting initial status")
rfUserCA := NewRemoteFile(node, userCABundleCertFile)
o.Expect(rfUserCA.Fetch()).To(o.Succeed(), "Error getting the initial user CA bundle content")
initialUserCAContent := rfUserCA.GetTextContent()
rfCloudCA := NewRemoteFile(node, kubeCloudCertFile)
o.Expect(rfCloudCA.Fetch()).To(o.Succeed(), "Error getting the initial cloud CA bundle content")
initialCloudCAContent := rfCloudCA.GetTextContent()
defer func() {
wMcp.waitForComplete()
mMcp.waitForComplete()
exutil.By("Checking that the user CA bundle file content was properly restored when the configuration was removed")
o.Eventually(rfUserCA.Read, "5m", "20s").Should(exutil.Secure(HaveContent(initialUserCAContent)),
"The user CA bundle file content was not restored after the configuration was removed")
logger.Infof("OK!\n")
exutil.By("Checking that the cloud CA bundle file content was properly restored when the configuration was removed")
o.Eventually(rfCloudCA.Read, "5m", "20s").Should(exutil.Secure(HaveContent(initialCloudCAContent)),
"The cloud CA bundle file content was not restored after the configuration was removed")
logger.Infof("OK!\n")
}()
logger.Infof("OK!\n")
// Create a new config map and configure the proxy additional trusted CA if necessary
proxyConfigMapName := proxy.GetOrFail(`{.spec.trustedCA.name}`)
if proxyConfigMapName == "" {
var err error
exutil.By("Configure the proxy with an additional trusted CA")
logger.Infof("Create a configmap with the CA")
proxyConfigMap, err = CreateConfigMapWithRandomCert(oc.AsAdmin(), cmNamespace, cmName, certFileKey)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating a configmap with a CA")
defer proxyConfigMap.Delete()
logger.Infof("Patch the proxy resource to use the new configmap")
initProxySpec := proxy.GetOrFail(`{.spec}`)
defer func() {
logger.Infof("Restore original proxy spec: %s", initProxySpec)
_ = proxy.Patch("json", `[{ "op": "add", "path": "/spec", "value": `+initProxySpec+`}]`)
}()
proxy.Patch("merge", fmt.Sprintf(`{"spec": {"trustedCA": {"name": "%s"}}}`, cmName))
// TODO: remove this when the userCA bundle is handled using a controller and not a MC. It will be implemented in the near future
mcp.waitForComplete()
logger.Infof("OK!\n")
} else {
logger.Infof("The proxy is already configured to use the CA inside this configmap: %s", proxyConfigMapName)
proxyConfigMap = NewConfigMap(oc.AsAdmin(), "openshift-config", proxyConfigMapName)
}
exutil.By(fmt.Sprintf(`Check that the "%s" is in the ignition config`, kubeCertFile))
jsonPath := fmt.Sprintf(`storage.files.#(path=="%s")`, kubeCertFile)
o.Eventually(mcp.GetMCSIgnitionConfig,
"1m", "20s").WithArguments(true, ignitionConfig).ShouldNot(
HavePathWithValue(jsonPath, o.BeEmpty()),
"The file %s is not served in the ignition config", kubeCertFile)
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf(`Check that the "%s" is in the ignition config`, userCABundleCertFile))
logger.Infof("Check that the file is served in the ignition config")
jsonPath = fmt.Sprintf(`storage.files.#(path=="%s")`, userCABundleCertFile)
o.Eventually(mcp.GetMCSIgnitionConfig,
"1m", "20s").WithArguments(true, ignitionConfig).ShouldNot(
HavePathWithValue(jsonPath, o.BeEmpty()),
"The file %s is not served in the ignition config", userCABundleCertFile)
logger.Infof("Check that the file has the right content in the nodes")
certContent := ""
if userCABundleConfigMap.Exists() {
userCABundleCert, exists, err := userCABundleConfigMap.HasKey(certFileKey)
o.Expect(err).NotTo(o.HaveOccurred(), "Error checking if %s contains key '%s'", userCABundleConfigMap, certFileKey)
if exists {
certContent = userCABundleCert
}
} else {
logger.Infof("%s does not exist. We don't take it into account", userCABundleConfigMap)
}
// OCPQE-17800 only merge the cert contents when trusted CA in proxy/cluster is not cm/user-ca-bundle
if proxyConfigMap.GetName() != userCABundleConfigMap.GetName() {
certContent += proxyConfigMap.GetDataValueOrFail(certFileKey)
}
EventuallyFileExistsInNode(userCABundleCertFile, certContent, node, "3m", "20s")
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf(`Check that the "%s" is CA trusted. Command update-ca-trust was executed when the file was added`, userCABundleCertFile))
o.Eventually(BundleFileIsCATrusted, "5m", "20s").WithArguments(userCABundleCertFile, node).Should(
o.BeTrue(), "The %s file was not ca-trusted. It seems that the update-ca-trust command was not executed after updating the file", userCABundleCertFile)
logger.Infof("OK!\n")
exutil.By(fmt.Sprintf(`Check that the "%s" is in the ignition config`, kubeCloudCertFile))
kubeCloudCertContent, err := kubeCloudManagedConfigMap.GetDataValue("ca-bundle.pem")
if err != nil {
logger.Infof("No KubeCloud cert configured, configuring a new value")
if kubeCloudProviderConfigMap != nil && kubeCloudProviderConfigMap.Exists() {
_, caPath, err := createCA(createTmpDir(), cloudCertFileKey)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new random certificate")
defer kubeCloudProviderConfigMap.RemoveDataKey(cloudCertFileKey)
kubeCloudProviderConfigMap.SetData("--from-file=" + cloudCertFileKey + "=" + caPath)
o.Eventually(kubeCloudManagedConfigMap.GetDataValueOrFail, "5m", "20s").WithArguments(cloudCertFileKey).ShouldNot(o.BeEmpty(),
"A new CA was added to %s but the managed resource %s was not populated", kubeCloudProviderConfigMap, kubeCloudManagedConfigMap)
kubeCloudCertContent = kubeCloudManagedConfigMap.GetDataValueOrFail(cloudCertFileKey)
} else {
logger.Infof("It is not possible to configure a new CloudCA. CloudProviderConfig configmap is not defined in the infrastructure resource or it does not exist")
kubeCloudCertContent = ""
}
}
if kubeCloudCertContent != "" {
logger.Infof("Check that the file is served in the ignition config")
jsonPath = fmt.Sprintf(`storage.files.#(path=="%s")`, kubeCloudCertFile)
o.Eventually(mcp.GetMCSIgnitionConfig,
"6m", "20s").WithArguments(true, ignitionConfig).ShouldNot(
HavePathWithValue(jsonPath, o.BeEmpty()),
"The file %s is not served in the ignition config", kubeCloudCertFile)
logger.Infof("Check that the file has the right content in the nodes")
EventuallyFileExistsInNode(kubeCloudCertFile, kubeCloudCertContent, node, "3m", "20s")
} else {
logger.Infof("No KubeCloud cert was configured and it was not possible to define a new one, we skip the cloudCA validation")
}
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
b5c0ffbf-ff09-40d4-a606-6f2af3abf5b2
|
Author:rioliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-71991-post action of user-ca-bundle change will skip drain,reboot and restart crio service [Disruptive]
|
['"encoding/pem"', '"fmt"', '"os"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
g.It("Author:rioliu-NonHyperShiftHOST-NonPreRelease-Longduration-High-71991-post action of user-ca-bundle change will skip drain,reboot and restart crio service [Disruptive]", func() {
var (
mcName = "mco-tc-71991"
filePath = "/etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt"
mode = 420 // decimal 0644
objsignCABundlePemPath = "/etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem"
node = mcp.GetSortedNodesOrFail()[0]
behaviourValidator = UpdateBehaviourValidator{
RebootNodesShouldBeSkipped: true,
DrainNodesShoulBeSkipped: true,
Checkers: []Checker{
NodeEventsChecker{
EventsSequence: []string{"Reboot", "Drain"},
EventsAreNotTriggered: true,
},
},
}
)
behaviourValidator.Initialize(mcp, nil)
exutil.By("Removing all MCD pods to clean the logs")
o.Expect(RemoveAllMCDPods(oc)).To(o.Succeed(), "Error removing all MCD pods in %s namespace", MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Create a new certificate")
_, caPath, err := createCA(createTmpDir(), "newcert.pem")
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new random certificate")
bcert, err := os.ReadFile(caPath)
o.Expect(err).NotTo(o.HaveOccurred(), "Error reading the new random certificate")
cert := string(bcert)
logger.Infof("OK!\n")
exutil.By("Create the MachineConfig with the new certificate")
file := ign32File{
Path: filePath,
Contents: ign32Contents{
Source: GetBase64EncodedFileSourceContent(cert),
},
Mode: PtrTo(mode),
}
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", string(MarshalOrFail(file)))}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
logger.Infof("OK!\n")
// Check that the MC is applied according to the expected behaviour
behaviourValidator.Validate()
exutil.By("Check that the certificate was created and updated in the cluster by using update-ca-trust command")
certRemote := NewRemoteFile(node, filePath)
objsignCABundleRemote := NewRemoteFile(node, objsignCABundlePemPath)
o.Eventually(certRemote, "5m", "20s").Should(Exist(),
"The file %s does not exist in the node %s after applying the configuration", certRemote.fullPath, node.GetName())
o.Eventually(certRemote, "5m", "20s").Should(exutil.Secure(HaveContent(o.ContainSubstring(cert))),
"%s doesn't have the expected content. It doesn't include the configured certificate", certRemote)
o.Eventually(objsignCABundleRemote, "5m", "20s").Should(Exist(),
"The file %s does not exist in the node %s after applying the configuration", certRemote.fullPath, node.GetName())
o.Expect(certRemote.Fetch()).To(o.Succeed(),
"There was an error trying to the the content of file %s in node %s", certRemote.fullPath, node.GetName())
// diff /etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt | less
// The new certificate should be included in the /etc/pki/ca-trust/extracted/pem/objsign-ca-bundle.pem file when we execute the update-ca-trust command
o.Expect(objsignCABundleRemote.Read()).To(exutil.Secure(HaveContent(o.ContainSubstring(certRemote.GetTextContent()))),
"In node %s: The the content of the file %s should have been added to the file %s. Command 'update-ca-trust' was not executed by MCD",
node.GetName(), certRemote.fullPath, objsignCABundleRemote.fullPath)
logger.Infof("OK!\n")
exutil.By("Removing all MCD pods to clean the logs before the MC deletion")
o.Expect(RemoveAllMCDPods(oc)).To(o.Succeed(), "Error removing all MCD pods in %s namespace", MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Delete the MachineConfig")
behaviourValidator.Initialize(mcp, nil) // re-initialize the validator to ignore previous events
mc.deleteNoWait()
logger.Infof("OK!\n")
// Check that the MC is removed according to the expected behaviour
behaviourValidator.Validate()
exutil.By("Check that the openshift-config-user-ca-bundle.crt file does not include the certificate anymore and the nodes were updated with update-ca-trust")
// The file is not removed, it is always present but with empty content
o.Eventually(certRemote.Read, "5m", "20s").ShouldNot(exutil.Secure(HaveContent(o.ContainSubstring(cert))),
"The certificate has been removed, but %s still contains the certificate", certRemote.fullPath, node.GetName())
o.Eventually(objsignCABundleRemote, "5m", "20s").Should(Exist(),
"The file %s does not exist in the node %s but it should exist after removing the configuration", certRemote.fullPath, node.GetName())
o.Expect(objsignCABundleRemote.Read()).NotTo(exutil.Secure(HaveContent(o.ContainSubstring(cert))),
"In node %s: The the certificate should have been removed from the file %s. Command 'update-ca-trust' was not executed by MCD after removing the MC",
node.GetName(), certRemote.fullPath, objsignCABundleRemote.fullPath)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
f12d6c06-34b4-412e-bd24-717b8a796d8a
|
Author:sregido-DEPRECATED-NonHyperShiftHOST-NonPreRelease-Critical-70857-[P1][OnCLayer] boostrap kubeconfig must be updated when kube-apiserver server CA is rotated [Disruptive]
|
['"strings"', '"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
g.It("Author:sregido-DEPRECATED-NonHyperShiftHOST-NonPreRelease-Critical-70857-[P1][OnCLayer] boostrap kubeconfig must be updated when kube-apiserver server CA is rotated [Disruptive]", func() {
var (
mco = NewResource(oc.AsAdmin(), "co", "machine-config")
kubernetesKubeconfigPath = "/etc/kubernetes/kubeconfig"
kubeletKubeconfigPath = "/var/lib/kubelet/kubeconfig"
lbServingSignerSecret = NewSecret(oc.AsAdmin(), "openshift-kube-apiserver-operator", "loadbalancer-serving-signer")
kubeAPIServerCM = NewConfigMap(oc.AsAdmin(), "openshift-config-managed", "kube-apiserver-server-ca")
node = mcp.GetSortedNodesOrFail()[0]
startTime = node.GetDateOrFail()
)
// we are going to fail the test if there is any CO degraded, so we want to know the initial status of the COs
NewResourceList(oc.AsAdmin(), "co").PrintDebugCommand()
exutil.By("Rotate certificate in loadbalancer-serving-signer secret")
newCert := rotateTLSSecretOrFail(lbServingSignerSecret)
logger.Debugf("New TLS cert:\n%s", newCert)
logger.Infof("OK!\n")
exutil.By("Check that the kube-apiserver-serve-ca configmap contains the new TLS secret")
o.Eventually(kubeAPIServerCM.GetDataValue, "5m", "20s").WithArguments("ca-bundle.crt").Should(
exutil.Secure(o.ContainSubstring(newCert)),
"The new TLS certificate was not added to configmap %s", kubeAPIServerCM)
caBundle := strings.TrimSpace(kubeAPIServerCM.GetDataValueOrFail("ca-bundle.crt"))
logger.Debugf("New CA bundle:\n%s", caBundle)
logger.Infof("OK!\n")
exutil.By("Check kubernetes kubconfig file was correctly updated")
// Eventually the kubeconfig file should be updated with the new certificates stored in kube-apiserver-serve-ca
rfKubernetesKubecon := NewRemoteFile(node, kubernetesKubeconfigPath)
o.Eventually(func() (string, error) {
err := rfKubernetesKubecon.Fetch()
if err != nil {
return "", err
}
cert, err := getCertsFromKubeconfig(rfKubernetesKubecon.GetTextContent())
if err != nil {
return "", err
}
logger.Debugf("Kube cert:\n%s", cert)
return strings.TrimSpace(cert), nil
}, "5m", "10s").
Should(exutil.Secure(o.Equal(caBundle)),
"%s does not contain the certificates stored in %s.", kubernetesKubeconfigPath, kubeAPIServerCM)
o.Expect(rfKubernetesKubecon).To(o.And(
HaveOctalPermissions("0600"),
HaveOwner("root"),
HaveGroup("root")),
"Wrong security attributes in %s", rfKubernetesKubecon)
logger.Infof("OK!\n")
exutil.By("Check kubelet kubconfig file was correctly updated")
// Eventually the kubeconfig file should be updated with the new certificates stored in kube-apiserver-serve-ca
o.Eventually(func() (string, error) {
rfKubeletKubecon := NewRemoteFile(node, kubeletKubeconfigPath)
err := rfKubeletKubecon.Fetch()
if err != nil {
return "", err
}
cert, err := getCertsFromKubeconfig(rfKubeletKubecon.GetTextContent())
if err != nil {
return "", err
}
return cert, nil
}, "5m", "10s").
Should(exutil.Secure(o.Equal(caBundle)),
"%s does not contain the certificates stored in %s.", kubernetesKubeconfigPath, kubeAPIServerCM)
o.Expect(rfKubernetesKubecon).To(o.And(
HaveOctalPermissions("0600"),
HaveOwner("root"),
HaveGroup("root")),
"Wrong security attributes in %s", rfKubernetesKubecon)
logger.Infof("OK!\n")
exutil.By("Check that kubelet was restarted")
o.Eventually(node.GetUnitActiveEnterTime, "6m", "20s").WithArguments("kubelet.service").Should(o.BeTemporally(">", startTime),
"Kubelet service was NOT restarted, but it should be")
logger.Infof("OK!\n")
exutil.By("Check that MCO pods are healthy")
o.Expect(waitForAllMCOPodsReady(oc.AsAdmin(), 10*time.Minute)).To(o.Succeed(),
"MCO pods are not Ready after cert rotation")
o.Eventually(mco, "5m", "20s").ShouldNot(BeDegraded(), "Error! %s is degraded:\n%s", mco, mco.PrettyString())
logger.Infof("OK!\n")
exutil.By("Check that all cluster operators are healthy")
checkAllOperatorsHealthy(oc.AsAdmin(), "20m", "30s")
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
931752c6-a69b-426c-98a5-1474320fd586
|
Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-75222-[P2] tlSecurityProfile switch and check the expected tlsMinVersion and cipheres suite are seen in MCS,MSS and rbac-kube-proxy pod logs[Disruptive]
|
['"crypto/tls"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Critical-75222-[P2] tlSecurityProfile switch and check the expected tlsMinVersion and cipheres suite are seen in MCS,MSS and rbac-kube-proxy pod logs[Disruptive]", func() {
var (
apiServer = NewResource(oc.AsAdmin(), "apiserver", "cluster")
)
exutil.By("Verify for Intermediate TLS Profile")
csNameList := getCipherSuitesNameforSpecificVersion(VersionTLS12)
var csVersion12 []string
for i := range csNameList {
if !strings.Contains(csNameList[i], "_CBC_") {
csVersion12 = append(csVersion12, csNameList[i])
}
}
validateCorrectTLSProfileSecurity(oc, "", "VersionTLS12", csVersion12)
logger.Infof("OK!\n")
defer func(initialConfig string) {
exutil.By("Restore with previous apiserver value")
apiServer.SetSpec(initialConfig)
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "6m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
}(apiServer.GetSpecOrFail())
exutil.By("Patch the Custom tlsSecurityProfile")
o.Expect(apiServer.Patch("json",
`[{ "op": "add", "path": "/spec/tlsSecurityProfile", "value": {"type": "Custom","custom": {"ciphers": ["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES128-GCM-SHA256" ],"minTLSVersion": "VersionTLS11"}}}]`)).To(o.Succeed(), "Error patching tlsSecurityProfile")
logger.Infof("OK!\n")
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "12m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify for Custom TLS Profile")
customCipherSuite := []string{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"}
validateCorrectTLSProfileSecurity(oc, "Custom", "VersionTLS11", customCipherSuite)
logger.Infof("OK!\n")
exutil.By("Patch the Old tlsSecurityProfile")
o.Expect(apiServer.Patch("json",
`[{ "op": "add", "path": "/spec/tlsSecurityProfile", "value": {"type": "Old","old": {}}}]`)).To(o.Succeed(), "Error patching http proxy")
logger.Infof("OK!\n")
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "12m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
exutil.By("Verify for Old TLS Profile")
csNameList = getCipherSuitesNameforSpecificVersion(VersionTLS10)
validateCorrectTLSProfileSecurity(oc, "Old", "VersionTLS10", csNameList)
logger.Infof("OK!\n")
// For now Modern Profile is not supported
match := `Unsupported value: "Modern"`
exutil.By("Patch the Modern tlsSecurityProfile")
tlsPatch := apiServer.Patch("json",
`[{ "op": "add", "path": "/spec/tlsSecurityProfile", "value": {"type": "Modern"}}]`)
o.Expect(tlsPatch.(*exutil.ExitError).StdErr).To(o.ContainSubstring(match))
})
| |||||
test case
|
openshift/openshift-tests-private
|
52768b9b-f459-440d-a505-77263af6e7c7
|
Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-75543-tlsSecurity setting is also propagated on node in kubelet.conf [Disruptive]
|
['"crypto/tls"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
g.It("Author:ptalgulk-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-75543-tlsSecurity setting is also propagated on node in kubelet.conf [Disruptive]", func() {
var (
node = wMcp.GetSortedNodesOrFail()[0]
apiServer = NewResource(oc.AsAdmin(), "apiserver", "cluster")
kcName = "tc-75543-set-kubelet-custom-tls-profile"
kcTemplate = generateTemplateAbsolutePath("custom-tls-profile-kubelet-config.yaml")
customCipherSuite = []string{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"}
)
csNameList := getCipherSuitesNameforSpecificVersion(VersionTLS12)
var csVersion12 []string
for _, name := range csNameList {
if !strings.Contains(name, "_CBC_") {
csVersion12 = append(csVersion12, name)
}
}
exutil.By("Verify for Intermediate TLS Profile in kubeletConfig")
validateCorrectTLSProfileSecurityInKubeletConfig(node, "VersionTLS12", csVersion12)
exutil.By("Verify for Intermediate TLS Profile pod logs")
validateCorrectTLSProfileSecurity(oc, "", "VersionTLS12", csVersion12)
defer func(initialConfig string) {
exutil.By("Restore with previous apiserver value")
apiServer.SetSpec(initialConfig)
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "6m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
}(apiServer.GetSpecOrFail())
exutil.By("Patch the Old tlsSecurityProfile")
o.Expect(apiServer.Patch("json",
`[{ "op": "add", "path": "/spec/tlsSecurityProfile", "value": {"type": "Old","old": {}}}]`)).To(o.Succeed(), "Error patching http proxy")
logger.Infof("OK!\n")
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "12m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify for Old TLS Profile in kubeletConfig")
csVersion10 := getCipherSuitesNameforSpecificVersion(VersionTLS10)
validateCorrectTLSProfileSecurityInKubeletConfig(node, "VersionTLS10", csVersion10)
exutil.By("Verify for Old TLS Profile in pod logs")
validateCorrectTLSProfileSecurity(oc, "Old", "VersionTLS10", csVersion10)
exutil.By("Create Kubeletconfig to configure a custom tlsSecurityProfile")
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
defer kc.Delete()
kc.create()
logger.Infof("KubeletConfig was created. Waiting for success.")
kc.waitUntilSuccess("5m")
logger.Infof("OK!\n")
exutil.By("Wait for Worker MachineConfigPool to be updated")
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify for Custom TLS Profile in kubeletConfig")
validateCorrectTLSProfileSecurityInKubeletConfig(node, "VersionTLS11", customCipherSuite)
exutil.By("Patch the Intermediate tlsSecurityProfile to check kubeletconfig settings are not changed")
o.Expect(apiServer.Patch("json",
`[{ "op": "add", "path": "/spec/tlsSecurityProfile", "value": {"type": "Intermediate","intermediate": {}}}]`)).To(o.Succeed(), "Error patching http proxy")
logger.Infof("OK!\n")
exutil.By("Check that all cluster operators are stable")
o.Expect(WaitForStableCluster(oc.AsAdmin(), "30s", "50m")).To(o.Succeed(), "Not all COs were ready after configuring the tls profile")
logger.Infof("Wait for MCC to get the leader lease")
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "12m", "20s").Should(o.BeTrue(),
"The controller pod didn't acquire the lease properly.")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify for Intermediate TLS Profile pod logs")
validateCorrectTLSProfileSecurity(oc, "Intermediate", "VersionTLS12", csVersion12)
exutil.By("Verify for Custom TLS Profile not changed in kubeletConfig")
validateCorrectTLSProfileSecurityInKubeletConfig(node, "VersionTLS11", customCipherSuite)
exutil.By("Delete create kubeletConfig template")
kc.DeleteOrFail()
o.Expect(kc).NotTo(Exist())
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("To check the kubeletConfig to have same tls setting as of API server")
validateCorrectTLSProfileSecurityInKubeletConfig(node, "VersionTLS12", csVersion12)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
c9a5a2f3-af22-47c9-8fb8-825f8b56734b
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Critical-76587-[P1] MCS port should not expose weak ciphers to external client from master node IP [Disruptive]
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Critical-76587-[P1] MCS port should not expose weak ciphers to external client from master node IP [Disruptive]", func() {
var (
node = mcp.GetSortedNodesOrFail()[0]
port = 22623
insecureCiphers = []string{"TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384"}
)
exutil.By("Remove iptable rules")
logger.Infof("Remove the IPV4 iptables rules that block the ignition config")
removedRules, err := node.RemoveIPTablesRulesByRegexp(fmt.Sprintf("%d", port))
defer node.ExecIPTables(removedRules)
o.Expect(err).NotTo(o.HaveOccurred(), "Error removing the IPv4 iptables rules for port %s in node %s", port, node.GetName())
logger.Infof("Remove the IPV6 ip6tables rules that block the ignition config")
removed6Rules, err := node.RemoveIP6TablesRulesByRegexp(fmt.Sprintf("%d", port))
o.Expect(err).NotTo(o.HaveOccurred(), "Error removing the IPv6 iptables rules for port %s in node %s", port, node.GetName())
defer node.ExecIP6Tables(removed6Rules)
logger.Infof("OK!\n")
internalAPIServerURI, err := GetAPIServerInternalURI(mcp.oc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the internal apiserver URL")
exutil.By("Check that no weak cipher is exposed")
url := fmt.Sprintf("%s:%d", internalAPIServerURI, port)
cipherOutput, cipherErr := node.DebugNodeWithOptions([]string{"--image=" + TestSSLImage, "-n", MachineConfigNamespace}, "testssl.sh", "--color", "0", url)
logger.Infof("test ssh script output:\n %s", cipherOutput)
o.Expect(cipherErr).NotTo(o.HaveOccurred())
for _, insecureCipher := range insecureCiphers {
logger.Infof("Verify %s", insecureCipher)
o.Expect(cipherOutput).NotTo(o.ContainSubstring(insecureCipher),
"MCO is exposing weak ciphers in %s", internalAPIServerURI)
logger.Infof("OK")
}
logger.Infof("Verify SWEET32")
o.Expect(cipherOutput).To(o.MatchRegexp("SWEET32 .*"+regexp.QuoteMeta("not vulnerable (OK)")),
"%s is vulnerable to SWEET32", internalAPIServerURI)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
faa4d640-3ac4-4830-bcfd-adddcd9f1437
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-75521-[P1] Log details for malformed certificates. No infinite loop [Disruptive]
|
['"crypto/x509"', '"encoding/pem"', '"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_security.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-75521-[P1] Log details for malformed certificates. No infinite loop [Disruptive]", func() {
var (
configCM = NewConfigMap(oc.AsAdmin(), "openshift-config", "cloud-provider-config")
bundleKey = "ca-bundle.pem"
malformedCertFilePath = generateTemplateAbsolutePath("malformedcert.pem")
mcc = NewController(oc.AsAdmin())
expectedErrorMsg = "Malformed certificate 'CloudProviderCAData' detected and is not syncing. Error: x509: malformed certificate, Cert data: -----BEGIN CERTIFICATE---"
restoreFunc func() error
)
if !configCM.Exists() {
g.Skip(fmt.Sprintf("%s does not exist, we cannot recofigure it", configCM))
}
currentBundle, hasKey, err := configCM.HasKey(bundleKey)
o.Expect(err).NotTo(o.HaveOccurred(), "Error checking if key %s exists in %s", bundleKey, configCM)
if hasKey {
restoreFunc = func() error {
logger.Infof("Restoring initial data in %s", configCM)
configCM.oc.NotShowInfo()
return configCM.SetData(bundleKey + "=" + currentBundle)
}
} else {
restoreFunc = func() error {
return configCM.RemoveDataKey(bundleKey)
}
}
defer restoreFunc()
exutil.By("Configure a malformed certificate")
o.Expect(
configCM.SetData("--from-file="+bundleKey+"="+malformedCertFilePath),
).To(o.Succeed(), "Error configuring the %s value in %s", bundleKey, malformedCertFilePath)
logger.Infof("OK!\n")
exutil.By("Check that the error is correctly reported")
o.Eventually(mcc.GetLogs, "5m", "20s").Should(o.ContainSubstring(expectedErrorMsg),
"The malformed certificate is not correctly reported in the controller logs")
logger.Infof("OK!\n")
exutil.By("Restore the initial certificate values")
o.Expect(restoreFunc()).To(o.Succeed(),
"Error restoring the initial certificate values in %s", configCM)
logger.Infof("OK!\n")
exutil.By("Check that no more errors are reported")
currentLogs, err := mcc.GetLogs()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MCC logs")
o.Eventually(func() (string, error) {
// we return the new recently printed logs only
var diffLogs string
newLogs, err := mcc.GetLogs()
if err != nil {
return "", err
}
diffLogs = strings.ReplaceAll(newLogs, currentLogs, "")
currentLogs = newLogs
logger.Infof("Checking diff logs: %s", diffLogs)
return diffLogs, nil
}, "5m", "20s").ShouldNot(o.ContainSubstring(expectedErrorMsg),
"The certificate was fixed but the controller is still reporting an error")
o.Consistently(func() (string, error) {
// we return the new recently printed logs only
var diffLogs string
newLogs, err := mcc.GetLogs()
if err != nil {
return "", err
}
diffLogs = strings.ReplaceAll(newLogs, currentLogs, "")
currentLogs = newLogs
logger.Infof("Checking diff logs: %s", diffLogs)
return diffLogs, nil
}, "1m", "20s").ShouldNot(o.ContainSubstring(expectedErrorMsg),
"The certificate was fixed but the controller is still reporting an error")
logger.Infof("OK!\n")
})
| |||||
test
|
openshift/openshift-tests-private
|
eb9f32b5-92d8-43d5-9cef-885e9013669d
|
mco_upgrade
|
import (
"fmt"
"os"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
package mco
import (
"fmt"
"os"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
var _ = g.Describe("[sig-mco] MCO Upgrade", func() {
defer g.GinkgoRecover()
var (
// init cli object, temp namespace contains prefix mco.
// tip: don't put this in BeforeEach/JustBeforeEach, you will get error
// "You may only call AfterEach from within a Describe, Context or When"
oc = exutil.NewCLI("mco-upgrade", exutil.KubeConfigPath())
// temp dir to store all test files, and it will be recycled when test is finished
tmpdir string
wMcp *MachineConfigPool
)
g.JustBeforeEach(func() {
tmpdir = createTmpDir()
preChecks(oc)
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
})
g.JustAfterEach(func() {
os.RemoveAll(tmpdir)
logger.Infof("test dir %s is cleaned up", tmpdir)
})
g.It("Author:rioliu-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-45550-[P1] upgrade cluster is failed on RHEL node", func() {
skipTestIfOsIsNotRhelOs(oc)
exutil.By("iterate all rhel nodes to check the machine config related annotations")
allRhelNodes := NewNodeList(oc).GetAllRhelWokerNodesOrFail()
for _, node := range allRhelNodes {
state := node.GetAnnotationOrFail(NodeAnnotationState)
reason := node.GetAnnotationOrFail(NodeAnnotationReason)
logger.Infof("checking node %s ...", node.GetName())
o.Expect(state).Should(o.Equal("Done"), fmt.Sprintf("annotation [%s] value is not expected: %s", NodeAnnotationState, state))
o.Expect(reason).ShouldNot(o.ContainSubstring(`Failed to find /dev/disk/by-label/root`),
fmt.Sprintf("annotation [%s] value has unexpected error message", NodeAnnotationReason))
}
})
g.It("Author:rioliu-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-55748-[P2] Upgrade failed with Transaction in progress", func() {
exutil.By("check machine config daemon log to verify no error `Transaction in progress` found")
allNodes, getNodesErr := NewNodeList(oc).GetAllLinux()
o.Expect(getNodesErr).NotTo(o.HaveOccurred(), "Get all linux nodes error")
for _, node := range allNodes {
logger.Infof("checking mcd log on %s", node.GetName())
errLog, getLogErr := node.GetMCDaemonLogs("'Transaction in progress: (null)'")
o.Expect(getLogErr).Should(o.HaveOccurred(), "Unexpected error found in MCD log")
o.Expect(errLog).Should(o.BeEmpty(), "Transaction in progress error found, it is unexpected")
logger.Infof("no error found")
}
})
g.It("Author:rioliu-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-59427-ssh keys can be migrated to new dir when node is upgraded from RHCOS8 to RHCOS9", func() {
var (
oldAuthorizedKeyPath = "/home/core/.ssh/authorized_key"
newAuthorizedKeyPath = "/home/core/.ssh/authorized_keys.d/ignition"
)
allCoreOsNodes := NewNodeList(oc).GetAllCoreOsNodesOrFail()
for _, node := range allCoreOsNodes {
// Some tests are intermittently leaking a "NoExecute" taint in the nodes. When it happens this test case fails because the "debug" pod cannot run in nodes with this taint
// In order to avoid this instability we make sure that we only check nodes where the "debug" pod can run
if node.HasTaintEffectOrFail("NoExecute") {
logger.Infof("Node %s is tainted with 'NoExecute'. Validation skipped.", node.GetName())
continue
}
if node.GetConditionStatusByType("DiskPressure") != FalseString {
logger.Infof("Node %s is under disk pressure. The node cannot be debugged. We skip the validation for this node", node.GetName())
continue
}
exutil.By(fmt.Sprintf("check authorized key dir and file on %s", node.GetName()))
o.Eventually(func(gm o.Gomega) {
output, err := node.DebugNodeWithChroot("stat", oldAuthorizedKeyPath)
gm.Expect(err).Should(o.HaveOccurred(), "old authorized key file still exists")
gm.Expect(output).Should(o.ContainSubstring("No such file or directory"))
}, "3m", "20s",
).Should(o.Succeed(),
"The old authorized key file still exists")
output, err := node.DebugNodeWithChroot("stat", newAuthorizedKeyPath)
o.Expect(err).ShouldNot(o.HaveOccurred(), "new authorized key file not found")
o.Expect(output).Should(o.ContainSubstring("File: " + newAuthorizedKeyPath))
}
})
g.It("Author:sregidor-PreChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-62154-[P1] Don't render new MC until base MCs update [Disruptive]", func() {
var (
kcName = "mco-tc-62154-kubeletconfig"
kcTemplate = generateTemplateAbsolutePath("generic-kubelet-config.yaml")
crName = "mco-tc-62154-crconfig"
crTemplate = generateTemplateAbsolutePath("generic-container-runtime-config.yaml")
kubeletConfig = `{"podsPerCore": 100}`
crConfig = `{"pidsLimit": 2048}`
)
if len(wMcp.GetNodesOrFail()) == 0 {
g.Skip("Worker pool has 0 nodes configured.")
}
// For debugging purposes
oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeletconfig,containerruntimeconfig").Execute()
exutil.By("create kubelet config to add max 100 pods per core")
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
kc.create("-p", "KUBELETCONFIG="+kubeletConfig)
exutil.By("create ContainerRuntimeConfig")
cr := NewContainerRuntimeConfig(oc.AsAdmin(), crName, crTemplate)
cr.create("-p", "CRCONFIG="+crConfig)
exutil.By("wait for worker pool to be ready")
wMcp.waitForComplete()
})
g.It("Author:sregidor-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-62154-[P2] Don't render new MC until base MCs update [Disruptive]", func() {
var (
kcName = "mco-tc-62154-kubeletconfig"
kcTemplate = generateTemplateAbsolutePath("generic-kubelet-config.yaml")
crName = "mco-tc-62154-crconfig"
crTemplate = generateTemplateAbsolutePath("generic-container-runtime-config.yaml")
)
// Skip if worker pool has no nodes
if len(wMcp.GetNodesOrFail()) == 0 {
g.Skip("Worker pool has 0 nodes configured.")
}
// For debugging purposes
oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeletconfig,containerruntimeconfig").Execute()
// Skip if the precheck part of the test was not executed
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
if !kc.Exists() {
g.Skip(fmt.Sprintf(`The PreChkUpgrade part of the test should have created a KubeletConfig resource "%s". This resource does not exist in the cluster. Maybe we are upgrading from an old branch like 4.5?`, kc.GetName()))
}
defer wMcp.waitForComplete()
defer kc.Delete()
cr := NewContainerRuntimeConfig(oc.AsAdmin(), crName, crTemplate)
if !cr.Exists() {
g.Skip(fmt.Sprintf(`The PreChkUpgrade part of the test should have created a ContainerRuntimConfig resource "%s". This resource does not exist in the cluster. Maybe we are upgrading from an old branch like 4.5?`, cr.GetName()))
}
defer cr.Delete()
logger.Infof("Jira issure: https://issues.redhat.com/browse/OCPBUGS-6018")
logger.Infof("PR: https://github.com/openshift/machine-config-operator/pull/3501")
exutil.By("check controller versions")
rmc, err := wMcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(),
"Cannot get the MC configured for worker pool")
// We don't check that the kubelet configuration and the container runtime configuration have the values that we configured
// because other preCheck test cases can override it. What we need to check is that the rendered MCs generated by our resources
// are generated by the right controller version
// Regarding the collision with other test cases we can have a look at https://issues.redhat.com/browse/OCPQE-19001
// The test cases we are colliding with are: OCP-45351 and OCP-45436 from NODE team
logger.Infof("Get controller version in rendered MC %s", rmc.GetName())
rmcCV := rmc.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/generated-by-controller-version}`)
logger.Infof("rendered MC controller version %s", rmcCV)
kblmc := NewMachineConfig(oc.AsAdmin(), kc.GetGeneratedMCNameOrFail(), MachineConfigPoolWorker)
logger.Infof("Get controller version in KubeletConfig generated MC %s", kblmc.GetName())
kblmcCV := kblmc.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/generated-by-controller-version}`)
logger.Infof("KubeletConfig generated MC controller version %s", kblmcCV)
crcmc := NewMachineConfig(oc.AsAdmin(), cr.GetGeneratedMCNameOrFail(), MachineConfigPoolWorker)
logger.Infof("Get controller version in ContainerRuntimeConfig generated MC %s", crcmc.GetName())
crcmcCV := crcmc.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/generated-by-controller-version}`)
logger.Infof("ContainerRuntimeConfig generated MC controller version %s", crcmcCV)
o.Expect(kblmcCV).To(o.Equal(rmcCV),
"KubeletConfig generated MC and worker pool rendered MC should have the same Controller Version annotation")
o.Expect(crcmcCV).To(o.Equal(rmcCV),
"ContainerRuntimeConfig generated MC and worker pool rendered MC should have the same Controller Version annotation")
})
g.It("Author:sregidor-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-Critical-64781-[P2] MAchine-Config-Operator should be compliant with CIS benchmark rule", func() {
exutil.By("Verify that machine-config-opeartor pod is not using the default SA")
o.Expect(
oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", MachineConfigNamespace, "-l", "k8s-app=machine-config-operator",
"-o", `jsonpath={.items[0].spec.serviceAccountName}`).Output(),
).NotTo(o.Equal("default"),
"machine-config-operator pod is using the 'default' serviceAccountName and it should not")
logger.Infof("OK!\n")
exutil.By("Verify that there is no clusterrolebinding for the default ServiceAccount")
defaultSAClusterRoleBinding := NewResource(oc.AsAdmin(), "clusterrolebinding", "default-account-openshift-machine-config-operator")
o.Expect(defaultSAClusterRoleBinding).NotTo(Exist(),
"The old clusterrolebinding for the 'default' service account exists and it should not exist")
logger.Infof("OK!\n")
})
g.It("NonHyperShiftHOST-ARO-Author:rioliu-PstChkUpgrade-NonPreRelease-Critical-70577-Run ovs-configuration.service before dnsmasq.service on Azure", func() {
skipTestIfSupportedPlatformNotMatched(oc, AzurePlatform)
var (
ovsconfigSvcName = "ovs-configuration.service"
dnsmasqSvcName = "dnsmasq.service"
masterNode = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster).GetCoreOsNodesOrFail()[0] // to compatible with SNO/Compact cluster, get a coreOS node from master pool
)
exutil.By("Check service is enabled for ovs-configuration.service")
o.Expect(masterNode.IsUnitEnabled(ovsconfigSvcName)).Should(o.BeTrue(), "service %s is not enabled", ovsconfigSvcName)
exutil.By("Check service dependencies of ovs-configuration.service")
o.Expect(masterNode.GetUnitProperties(ovsconfigSvcName)).Should(o.MatchRegexp(fmt.Sprintf(`Before=.*%s.*`, dnsmasqSvcName)), "Cannot find dependent service definition dnsmasq for ovs-configuration")
o.Expect(masterNode.GetUnitDependencies(ovsconfigSvcName, "--before")).Should(o.ContainSubstring(dnsmasqSvcName), "Cannot find dependent service dnsmasq for ovs-configuration")
exutil.By("Check service state of dnsmasq")
isActive := masterNode.IsUnitActive(dnsmasqSvcName)
if IsAROCluster(oc) {
o.Expect(isActive).Should(o.BeTrue(), "on ARO cluster service %s is not active", dnsmasqSvcName)
} else {
o.Expect(isActive).Should(o.BeFalse(), "on normal Azure cluster service %s should be inactive", dnsmasqSvcName)
}
})
g.It("Author:sregidor-NonHyperShiftHOST-PreChkUpgrade-NonPreRelease-High-70813-[P1] ManagedBootImages update boot image of machineset [Serial]", func() {
// Bootimages Update functionality is only available in GCP(GA) and AWS(GA)
skipTestIfSupportedPlatformNotMatched(oc, GCPPlatform, AWSPlatform)
skipTestIfWorkersCannotBeScaled(oc.AsAdmin())
SkipIfNoFeatureGate(oc.AsAdmin(), "ManagedBootImages")
SkipIfNoFeatureGate(oc.AsAdmin(), "ManagedBootImagesAWS")
var (
tmpNamespace = NewResource(oc.AsAdmin(), "ns", "tc-70813-tmp-namespace")
tmpConfigMap = NewConfigMap(oc.AsAdmin(), tmpNamespace.GetName(), "tc-70813-tmp-configmap")
clonedMSName = "cloned-tc-70813-label"
labelName = "mcotest"
labelValue = "update"
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
machineConfiguration = GetMachineConfiguration(oc.AsAdmin())
allMachineSets = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()
)
exutil.By("Persist information in a configmap in a tmp namespace")
if !tmpNamespace.Exists() {
logger.Infof("Creating namespace %s", tmpNamespace.GetName())
err := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(tmpNamespace.GetName(), "--skip-config-write").Execute()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the temporary namespace %s", tmpNamespace.GetName())
}
if !tmpConfigMap.Exists() {
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", tmpConfigMap.GetNamespace(), "configmap", tmpConfigMap.GetName()).Execute()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the temporary configmap %s", tmpConfigMap.GetName())
}
for _, ms := range allMachineSets {
logger.Infof("Store bootimage of machineset %s in tmp configmap", ms.GetName())
o.Expect(
tmpConfigMap.SetData(ms.GetName()+"="+ms.GetCoreOsBootImageOrFail()),
).To(o.Succeed(), "Error storing %s data in temporary configmap", ms.GetName())
}
logger.Infof("OK!\n")
exutil.By("Opt-in boot images update")
o.Expect(
machineConfiguration.SetPartialManagedBootImagesConfig(labelName, labelValue),
).To(o.Succeed(), "Error configuring Partial managedBootImages in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Clone the first machineset twice")
clonedMS, err := machineSet.Duplicate(clonedMSName)
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("Successfully created %s machineset", clonedMS.GetName())
logger.Infof("OK!\n")
exutil.By("Label the cloned machineset so that it is updated by MCO")
o.Expect(clonedMS.AddLabel(labelName, labelValue)).To(o.Succeed(),
"Error labeling %s", clonedMS)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-PstChkUpgrade-NonPreRelease-High-70813-[P2] ManagedBootImages update boot image of machineset [Serial]", func() {
skipTestIfWorkersCannotBeScaled(oc.AsAdmin())
// Bootimages Update functionality is only available in GCP(GA) and AWS(GA)
skipTestIfSupportedPlatformNotMatched(oc, GCPPlatform, AWSPlatform)
SkipIfNoFeatureGate(oc.AsAdmin(), "ManagedBootImages")
SkipIfNoFeatureGate(oc.AsAdmin(), "ManagedBootImagesAWS")
var (
tmpNamespace = NewResource(oc.AsAdmin(), "ns", "tc-70813-tmp-namespace")
tmpConfigMap = NewConfigMap(oc.AsAdmin(), tmpNamespace.GetName(), "tc-70813-tmp-configmap")
clonedMSLabelName = "cloned-tc-70813-label"
clonedMS = NewMachineSet(oc.AsAdmin(), MachineAPINamespace, clonedMSLabelName)
allMachineSets = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()
coreosBootimagesCM = NewConfigMap(oc.AsAdmin(), MachineConfigNamespace, "coreos-bootimages")
currentVersion = NewResource(oc.AsAdmin(), "ClusterVersion", "version").GetOrFail(`{.status.desired.version}`)
)
if !clonedMS.Exists() {
g.Skip("PreChkUpgrad part of this test case was skipped, so we skip the PstChkUpgrade part too")
}
defer clonedMS.Delete()
o.Expect(tmpConfigMap).To(Exist(), "The configmap with the pre-upgrade information was not found")
exutil.By("Check that the MCO boot images ConfigMap was updated")
o.Eventually(coreosBootimagesCM.Get, "5m", "20s").WithArguments(`{.data.MCOReleaseImageVersion}`).Should(o.Equal(currentVersion),
"The MCO boot images configmap doesn't have the right version after the upgrade")
logger.Infof("OK!\n")
exutil.By("Check that the right machinesets were updated with the right bootimage and user-data secret")
for _, ms := range allMachineSets {
logger.Infof("Checking machineset %s", ms.GetName())
if ms.GetName() == clonedMS.GetName() {
currentCoreOsBootImage := getCoreOsBootImageFromConfigMapOrFail(exutil.CheckPlatform(oc), getCurrentRegionOrFail(oc), *ms.GetArchitectureOrFail(), coreosBootimagesCM)
logger.Infof("Current coreOsBootImage: %s", currentCoreOsBootImage)
logger.Infof("Machineset %s should be updated", ms.GetName())
o.Eventually(ms.GetCoreOsBootImage, "5m", "20s").Should(o.ContainSubstring(currentCoreOsBootImage),
"%s was NOT updated to use the right boot image", ms)
o.Eventually(ms.GetUserDataSecret, "1m", "20s").Should(o.Equal("worker-user-data-managed"),
"%s was NOT updated to use the right boot image", ms)
} else {
// We check that the machineset has the same boot image that we stored before the upgrade started
logger.Infof("Machineset %s should NOT be updated", ms.GetName())
oldCoreOsBootImaget, err := tmpConfigMap.GetDataValue(ms.GetName())
if err != nil {
logger.Warnf("Not checking boot image for machineset %s. No data found in the temporary configmap. %s", ms.GetName(), tmpConfigMap.PrettyString())
continue // We don't want to fail the test case. The new could have been added by any other test case and we don't want to collide with other test cases
}
logger.Infof("Old coreOsBootImage: %s", oldCoreOsBootImaget)
o.Expect(ms.GetCoreOsBootImage()).To(o.Equal(oldCoreOsBootImaget),
"%s was updated, but it should not be updated", ms)
}
logger.Infof("OK!\n")
}
exutil.By("Check that the updated machineset can be scaled without problems")
defer wMcp.waitForComplete()
defer clonedMS.ScaleTo(0)
o.Expect(clonedMS.ScaleTo(1)).To(o.Succeed(),
"Error scaling up MachineSet %s", clonedMS.GetName())
logger.Infof("Waiting %s machineset for being ready", clonedMS)
o.Eventually(clonedMS.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", clonedMS.GetName())
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-PstChkUpgrade-NonPreRelease-Critical-76216-Scale up nodes after upgrade [Disruptive]", func() {
skipTestIfWorkersCannotBeScaled(oc.AsAdmin())
var (
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
clonedMSName = "cloned-tc-76216-scaleup"
)
exutil.By("Clone the first machineset")
clonedMS, err := machineSet.Duplicate(clonedMSName)
defer clonedMS.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("Successfully created %s machineset", clonedMS.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the updated machineset can be scaled without problems")
defer wMcp.waitForComplete()
defer clonedMS.ScaleTo(0)
o.Expect(clonedMS.ScaleTo(1)).To(o.Succeed(),
"Error scaling up MachineSet %s", clonedMS.GetName())
logger.Infof("Waiting %s machineset for being ready", clonedMS)
o.Eventually(clonedMS.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", clonedMS.GetName())
logger.Infof("OK!\n")
})
})
|
package mco
| ||||
test case
|
openshift/openshift-tests-private
|
71c3d135-353b-4104-88cc-1f8c997a63e5
|
Author:rioliu-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-45550-[P1] upgrade cluster is failed on RHEL node
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
g.It("Author:rioliu-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-45550-[P1] upgrade cluster is failed on RHEL node", func() {
skipTestIfOsIsNotRhelOs(oc)
exutil.By("iterate all rhel nodes to check the machine config related annotations")
allRhelNodes := NewNodeList(oc).GetAllRhelWokerNodesOrFail()
for _, node := range allRhelNodes {
state := node.GetAnnotationOrFail(NodeAnnotationState)
reason := node.GetAnnotationOrFail(NodeAnnotationReason)
logger.Infof("checking node %s ...", node.GetName())
o.Expect(state).Should(o.Equal("Done"), fmt.Sprintf("annotation [%s] value is not expected: %s", NodeAnnotationState, state))
o.Expect(reason).ShouldNot(o.ContainSubstring(`Failed to find /dev/disk/by-label/root`),
fmt.Sprintf("annotation [%s] value has unexpected error message", NodeAnnotationReason))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
95360ab3-8947-48ff-bcb8-99bcc8f3a6b2
|
Author:rioliu-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-55748-[P2] Upgrade failed with Transaction in progress
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
g.It("Author:rioliu-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-55748-[P2] Upgrade failed with Transaction in progress", func() {
exutil.By("check machine config daemon log to verify no error `Transaction in progress` found")
allNodes, getNodesErr := NewNodeList(oc).GetAllLinux()
o.Expect(getNodesErr).NotTo(o.HaveOccurred(), "Get all linux nodes error")
for _, node := range allNodes {
logger.Infof("checking mcd log on %s", node.GetName())
errLog, getLogErr := node.GetMCDaemonLogs("'Transaction in progress: (null)'")
o.Expect(getLogErr).Should(o.HaveOccurred(), "Unexpected error found in MCD log")
o.Expect(errLog).Should(o.BeEmpty(), "Transaction in progress error found, it is unexpected")
logger.Infof("no error found")
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
2ebebf6d-f1aa-412d-a640-6cc293175d31
|
Author:rioliu-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-59427-ssh keys can be migrated to new dir when node is upgraded from RHCOS8 to RHCOS9
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
g.It("Author:rioliu-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-59427-ssh keys can be migrated to new dir when node is upgraded from RHCOS8 to RHCOS9", func() {
var (
oldAuthorizedKeyPath = "/home/core/.ssh/authorized_key"
newAuthorizedKeyPath = "/home/core/.ssh/authorized_keys.d/ignition"
)
allCoreOsNodes := NewNodeList(oc).GetAllCoreOsNodesOrFail()
for _, node := range allCoreOsNodes {
// Some tests are intermittently leaking a "NoExecute" taint in the nodes. When it happens this test case fails because the "debug" pod cannot run in nodes with this taint
// In order to avoid this instability we make sure that we only check nodes where the "debug" pod can run
if node.HasTaintEffectOrFail("NoExecute") {
logger.Infof("Node %s is tainted with 'NoExecute'. Validation skipped.", node.GetName())
continue
}
if node.GetConditionStatusByType("DiskPressure") != FalseString {
logger.Infof("Node %s is under disk pressure. The node cannot be debugged. We skip the validation for this node", node.GetName())
continue
}
exutil.By(fmt.Sprintf("check authorized key dir and file on %s", node.GetName()))
o.Eventually(func(gm o.Gomega) {
output, err := node.DebugNodeWithChroot("stat", oldAuthorizedKeyPath)
gm.Expect(err).Should(o.HaveOccurred(), "old authorized key file still exists")
gm.Expect(output).Should(o.ContainSubstring("No such file or directory"))
}, "3m", "20s",
).Should(o.Succeed(),
"The old authorized key file still exists")
output, err := node.DebugNodeWithChroot("stat", newAuthorizedKeyPath)
o.Expect(err).ShouldNot(o.HaveOccurred(), "new authorized key file not found")
o.Expect(output).Should(o.ContainSubstring("File: " + newAuthorizedKeyPath))
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
2f63ad14-b61c-4791-b409-be6800f26173
|
Author:sregidor-PreChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-62154-[P1] Don't render new MC until base MCs update [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
g.It("Author:sregidor-PreChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-62154-[P1] Don't render new MC until base MCs update [Disruptive]", func() {
var (
kcName = "mco-tc-62154-kubeletconfig"
kcTemplate = generateTemplateAbsolutePath("generic-kubelet-config.yaml")
crName = "mco-tc-62154-crconfig"
crTemplate = generateTemplateAbsolutePath("generic-container-runtime-config.yaml")
kubeletConfig = `{"podsPerCore": 100}`
crConfig = `{"pidsLimit": 2048}`
)
if len(wMcp.GetNodesOrFail()) == 0 {
g.Skip("Worker pool has 0 nodes configured.")
}
// For debugging purposes
oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeletconfig,containerruntimeconfig").Execute()
exutil.By("create kubelet config to add max 100 pods per core")
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
kc.create("-p", "KUBELETCONFIG="+kubeletConfig)
exutil.By("create ContainerRuntimeConfig")
cr := NewContainerRuntimeConfig(oc.AsAdmin(), crName, crTemplate)
cr.create("-p", "CRCONFIG="+crConfig)
exutil.By("wait for worker pool to be ready")
wMcp.waitForComplete()
})
| ||||||
test case
|
openshift/openshift-tests-private
|
2be5bda8-d0ea-46a1-89c1-bef54b877cc6
|
Author:sregidor-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-62154-[P2] Don't render new MC until base MCs update [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
g.It("Author:sregidor-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-High-62154-[P2] Don't render new MC until base MCs update [Disruptive]", func() {
var (
kcName = "mco-tc-62154-kubeletconfig"
kcTemplate = generateTemplateAbsolutePath("generic-kubelet-config.yaml")
crName = "mco-tc-62154-crconfig"
crTemplate = generateTemplateAbsolutePath("generic-container-runtime-config.yaml")
)
// Skip if worker pool has no nodes
if len(wMcp.GetNodesOrFail()) == 0 {
g.Skip("Worker pool has 0 nodes configured.")
}
// For debugging purposes
oc.AsAdmin().WithoutNamespace().Run("get").Args("kubeletconfig,containerruntimeconfig").Execute()
// Skip if the precheck part of the test was not executed
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
if !kc.Exists() {
g.Skip(fmt.Sprintf(`The PreChkUpgrade part of the test should have created a KubeletConfig resource "%s". This resource does not exist in the cluster. Maybe we are upgrading from an old branch like 4.5?`, kc.GetName()))
}
defer wMcp.waitForComplete()
defer kc.Delete()
cr := NewContainerRuntimeConfig(oc.AsAdmin(), crName, crTemplate)
if !cr.Exists() {
g.Skip(fmt.Sprintf(`The PreChkUpgrade part of the test should have created a ContainerRuntimConfig resource "%s". This resource does not exist in the cluster. Maybe we are upgrading from an old branch like 4.5?`, cr.GetName()))
}
defer cr.Delete()
logger.Infof("Jira issure: https://issues.redhat.com/browse/OCPBUGS-6018")
logger.Infof("PR: https://github.com/openshift/machine-config-operator/pull/3501")
exutil.By("check controller versions")
rmc, err := wMcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(),
"Cannot get the MC configured for worker pool")
// We don't check that the kubelet configuration and the container runtime configuration have the values that we configured
// because other preCheck test cases can override it. What we need to check is that the rendered MCs generated by our resources
// are generated by the right controller version
// Regarding the collision with other test cases we can have a look at https://issues.redhat.com/browse/OCPQE-19001
// The test cases we are colliding with are: OCP-45351 and OCP-45436 from NODE team
logger.Infof("Get controller version in rendered MC %s", rmc.GetName())
rmcCV := rmc.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/generated-by-controller-version}`)
logger.Infof("rendered MC controller version %s", rmcCV)
kblmc := NewMachineConfig(oc.AsAdmin(), kc.GetGeneratedMCNameOrFail(), MachineConfigPoolWorker)
logger.Infof("Get controller version in KubeletConfig generated MC %s", kblmc.GetName())
kblmcCV := kblmc.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/generated-by-controller-version}`)
logger.Infof("KubeletConfig generated MC controller version %s", kblmcCV)
crcmc := NewMachineConfig(oc.AsAdmin(), cr.GetGeneratedMCNameOrFail(), MachineConfigPoolWorker)
logger.Infof("Get controller version in ContainerRuntimeConfig generated MC %s", crcmc.GetName())
crcmcCV := crcmc.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/generated-by-controller-version}`)
logger.Infof("ContainerRuntimeConfig generated MC controller version %s", crcmcCV)
o.Expect(kblmcCV).To(o.Equal(rmcCV),
"KubeletConfig generated MC and worker pool rendered MC should have the same Controller Version annotation")
o.Expect(crcmcCV).To(o.Equal(rmcCV),
"ContainerRuntimeConfig generated MC and worker pool rendered MC should have the same Controller Version annotation")
})
| |||||
test case
|
openshift/openshift-tests-private
|
72c602c0-42c8-4cf8-85d7-9c8a14e1bdfc
|
Author:sregidor-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-Critical-64781-[P2] MAchine-Config-Operator should be compliant with CIS benchmark rule
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
g.It("Author:sregidor-PstChkUpgrade-NonHyperShiftHOST-NonPreRelease-Critical-64781-[P2] MAchine-Config-Operator should be compliant with CIS benchmark rule", func() {
exutil.By("Verify that machine-config-opeartor pod is not using the default SA")
o.Expect(
oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", MachineConfigNamespace, "-l", "k8s-app=machine-config-operator",
"-o", `jsonpath={.items[0].spec.serviceAccountName}`).Output(),
).NotTo(o.Equal("default"),
"machine-config-operator pod is using the 'default' serviceAccountName and it should not")
logger.Infof("OK!\n")
exutil.By("Verify that there is no clusterrolebinding for the default ServiceAccount")
defaultSAClusterRoleBinding := NewResource(oc.AsAdmin(), "clusterrolebinding", "default-account-openshift-machine-config-operator")
o.Expect(defaultSAClusterRoleBinding).NotTo(Exist(),
"The old clusterrolebinding for the 'default' service account exists and it should not exist")
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
96219fab-b2e5-47f1-98b5-9966f16e117b
|
NonHyperShiftHOST-ARO-Author:rioliu-PstChkUpgrade-NonPreRelease-Critical-70577-Run ovs-configuration.service before dnsmasq.service on Azure
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
g.It("NonHyperShiftHOST-ARO-Author:rioliu-PstChkUpgrade-NonPreRelease-Critical-70577-Run ovs-configuration.service before dnsmasq.service on Azure", func() {
skipTestIfSupportedPlatformNotMatched(oc, AzurePlatform)
var (
ovsconfigSvcName = "ovs-configuration.service"
dnsmasqSvcName = "dnsmasq.service"
masterNode = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster).GetCoreOsNodesOrFail()[0] // to compatible with SNO/Compact cluster, get a coreOS node from master pool
)
exutil.By("Check service is enabled for ovs-configuration.service")
o.Expect(masterNode.IsUnitEnabled(ovsconfigSvcName)).Should(o.BeTrue(), "service %s is not enabled", ovsconfigSvcName)
exutil.By("Check service dependencies of ovs-configuration.service")
o.Expect(masterNode.GetUnitProperties(ovsconfigSvcName)).Should(o.MatchRegexp(fmt.Sprintf(`Before=.*%s.*`, dnsmasqSvcName)), "Cannot find dependent service definition dnsmasq for ovs-configuration")
o.Expect(masterNode.GetUnitDependencies(ovsconfigSvcName, "--before")).Should(o.ContainSubstring(dnsmasqSvcName), "Cannot find dependent service dnsmasq for ovs-configuration")
exutil.By("Check service state of dnsmasq")
isActive := masterNode.IsUnitActive(dnsmasqSvcName)
if IsAROCluster(oc) {
o.Expect(isActive).Should(o.BeTrue(), "on ARO cluster service %s is not active", dnsmasqSvcName)
} else {
o.Expect(isActive).Should(o.BeFalse(), "on normal Azure cluster service %s should be inactive", dnsmasqSvcName)
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
0e732c76-057a-4516-ba86-e5320b01bff0
|
Author:sregidor-NonHyperShiftHOST-PreChkUpgrade-NonPreRelease-High-70813-[P1] ManagedBootImages update boot image of machineset [Serial]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
g.It("Author:sregidor-NonHyperShiftHOST-PreChkUpgrade-NonPreRelease-High-70813-[P1] ManagedBootImages update boot image of machineset [Serial]", func() {
// Bootimages Update functionality is only available in GCP(GA) and AWS(GA)
skipTestIfSupportedPlatformNotMatched(oc, GCPPlatform, AWSPlatform)
skipTestIfWorkersCannotBeScaled(oc.AsAdmin())
SkipIfNoFeatureGate(oc.AsAdmin(), "ManagedBootImages")
SkipIfNoFeatureGate(oc.AsAdmin(), "ManagedBootImagesAWS")
var (
tmpNamespace = NewResource(oc.AsAdmin(), "ns", "tc-70813-tmp-namespace")
tmpConfigMap = NewConfigMap(oc.AsAdmin(), tmpNamespace.GetName(), "tc-70813-tmp-configmap")
clonedMSName = "cloned-tc-70813-label"
labelName = "mcotest"
labelValue = "update"
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
machineConfiguration = GetMachineConfiguration(oc.AsAdmin())
allMachineSets = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()
)
exutil.By("Persist information in a configmap in a tmp namespace")
if !tmpNamespace.Exists() {
logger.Infof("Creating namespace %s", tmpNamespace.GetName())
err := oc.AsAdmin().WithoutNamespace().Run("new-project").Args(tmpNamespace.GetName(), "--skip-config-write").Execute()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the temporary namespace %s", tmpNamespace.GetName())
}
if !tmpConfigMap.Exists() {
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", tmpConfigMap.GetNamespace(), "configmap", tmpConfigMap.GetName()).Execute()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the temporary configmap %s", tmpConfigMap.GetName())
}
for _, ms := range allMachineSets {
logger.Infof("Store bootimage of machineset %s in tmp configmap", ms.GetName())
o.Expect(
tmpConfigMap.SetData(ms.GetName()+"="+ms.GetCoreOsBootImageOrFail()),
).To(o.Succeed(), "Error storing %s data in temporary configmap", ms.GetName())
}
logger.Infof("OK!\n")
exutil.By("Opt-in boot images update")
o.Expect(
machineConfiguration.SetPartialManagedBootImagesConfig(labelName, labelValue),
).To(o.Succeed(), "Error configuring Partial managedBootImages in the 'cluster' MachineConfiguration resource")
logger.Infof("OK!\n")
exutil.By("Clone the first machineset twice")
clonedMS, err := machineSet.Duplicate(clonedMSName)
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("Successfully created %s machineset", clonedMS.GetName())
logger.Infof("OK!\n")
exutil.By("Label the cloned machineset so that it is updated by MCO")
o.Expect(clonedMS.AddLabel(labelName, labelValue)).To(o.Succeed(),
"Error labeling %s", clonedMS)
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
af65e924-e287-4fbe-bc19-779010e14ee5
|
Author:sregidor-NonHyperShiftHOST-PstChkUpgrade-NonPreRelease-High-70813-[P2] ManagedBootImages update boot image of machineset [Serial]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
g.It("Author:sregidor-NonHyperShiftHOST-PstChkUpgrade-NonPreRelease-High-70813-[P2] ManagedBootImages update boot image of machineset [Serial]", func() {
skipTestIfWorkersCannotBeScaled(oc.AsAdmin())
// Bootimages Update functionality is only available in GCP(GA) and AWS(GA)
skipTestIfSupportedPlatformNotMatched(oc, GCPPlatform, AWSPlatform)
SkipIfNoFeatureGate(oc.AsAdmin(), "ManagedBootImages")
SkipIfNoFeatureGate(oc.AsAdmin(), "ManagedBootImagesAWS")
var (
tmpNamespace = NewResource(oc.AsAdmin(), "ns", "tc-70813-tmp-namespace")
tmpConfigMap = NewConfigMap(oc.AsAdmin(), tmpNamespace.GetName(), "tc-70813-tmp-configmap")
clonedMSLabelName = "cloned-tc-70813-label"
clonedMS = NewMachineSet(oc.AsAdmin(), MachineAPINamespace, clonedMSLabelName)
allMachineSets = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()
coreosBootimagesCM = NewConfigMap(oc.AsAdmin(), MachineConfigNamespace, "coreos-bootimages")
currentVersion = NewResource(oc.AsAdmin(), "ClusterVersion", "version").GetOrFail(`{.status.desired.version}`)
)
if !clonedMS.Exists() {
g.Skip("PreChkUpgrad part of this test case was skipped, so we skip the PstChkUpgrade part too")
}
defer clonedMS.Delete()
o.Expect(tmpConfigMap).To(Exist(), "The configmap with the pre-upgrade information was not found")
exutil.By("Check that the MCO boot images ConfigMap was updated")
o.Eventually(coreosBootimagesCM.Get, "5m", "20s").WithArguments(`{.data.MCOReleaseImageVersion}`).Should(o.Equal(currentVersion),
"The MCO boot images configmap doesn't have the right version after the upgrade")
logger.Infof("OK!\n")
exutil.By("Check that the right machinesets were updated with the right bootimage and user-data secret")
for _, ms := range allMachineSets {
logger.Infof("Checking machineset %s", ms.GetName())
if ms.GetName() == clonedMS.GetName() {
currentCoreOsBootImage := getCoreOsBootImageFromConfigMapOrFail(exutil.CheckPlatform(oc), getCurrentRegionOrFail(oc), *ms.GetArchitectureOrFail(), coreosBootimagesCM)
logger.Infof("Current coreOsBootImage: %s", currentCoreOsBootImage)
logger.Infof("Machineset %s should be updated", ms.GetName())
o.Eventually(ms.GetCoreOsBootImage, "5m", "20s").Should(o.ContainSubstring(currentCoreOsBootImage),
"%s was NOT updated to use the right boot image", ms)
o.Eventually(ms.GetUserDataSecret, "1m", "20s").Should(o.Equal("worker-user-data-managed"),
"%s was NOT updated to use the right boot image", ms)
} else {
// We check that the machineset has the same boot image that we stored before the upgrade started
logger.Infof("Machineset %s should NOT be updated", ms.GetName())
oldCoreOsBootImaget, err := tmpConfigMap.GetDataValue(ms.GetName())
if err != nil {
logger.Warnf("Not checking boot image for machineset %s. No data found in the temporary configmap. %s", ms.GetName(), tmpConfigMap.PrettyString())
continue // We don't want to fail the test case. The new could have been added by any other test case and we don't want to collide with other test cases
}
logger.Infof("Old coreOsBootImage: %s", oldCoreOsBootImaget)
o.Expect(ms.GetCoreOsBootImage()).To(o.Equal(oldCoreOsBootImaget),
"%s was updated, but it should not be updated", ms)
}
logger.Infof("OK!\n")
}
exutil.By("Check that the updated machineset can be scaled without problems")
defer wMcp.waitForComplete()
defer clonedMS.ScaleTo(0)
o.Expect(clonedMS.ScaleTo(1)).To(o.Succeed(),
"Error scaling up MachineSet %s", clonedMS.GetName())
logger.Infof("Waiting %s machineset for being ready", clonedMS)
o.Eventually(clonedMS.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", clonedMS.GetName())
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
de23bfcd-b377-46c9-a380-cdebe8987b8d
|
Author:sregidor-NonHyperShiftHOST-PstChkUpgrade-NonPreRelease-Critical-76216-Scale up nodes after upgrade [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_upgrade.go
|
g.It("Author:sregidor-NonHyperShiftHOST-PstChkUpgrade-NonPreRelease-Critical-76216-Scale up nodes after upgrade [Disruptive]", func() {
skipTestIfWorkersCannotBeScaled(oc.AsAdmin())
var (
machineSet = NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAllOrFail()[0]
clonedMSName = "cloned-tc-76216-scaleup"
)
exutil.By("Clone the first machineset")
clonedMS, err := machineSet.Duplicate(clonedMSName)
defer clonedMS.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating %s", machineSet)
logger.Infof("Successfully created %s machineset", clonedMS.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the updated machineset can be scaled without problems")
defer wMcp.waitForComplete()
defer clonedMS.ScaleTo(0)
o.Expect(clonedMS.ScaleTo(1)).To(o.Succeed(),
"Error scaling up MachineSet %s", clonedMS.GetName())
logger.Infof("Waiting %s machineset for being ready", clonedMS)
o.Eventually(clonedMS.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", clonedMS.GetName())
logger.Infof("OK!\n")
})
| ||||||
file
|
openshift/openshift-tests-private
|
05bd4d67-10ce-41c6-a668-bb8200820d4f
|
node
|
import (
"context"
"fmt"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
expect "github.com/google/goexpect"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
"k8s.io/apimachinery/pkg/util/wait"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
package mco
import (
"context"
"fmt"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
expect "github.com/google/goexpect"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
"k8s.io/apimachinery/pkg/util/wait"
)
// Node is used to handle node OCP resources
type Node struct {
Resource
eventCheckpoint time.Time
}
// NodeList handles list of nodes
type NodeList struct {
ResourceList
}
// Struct that stores data usage information in bytes
type SpaceUsage struct {
Used int64
Avail int64
}
// NewNode construct a new node struct
func NewNode(oc *exutil.CLI, name string) *Node {
return &Node{*NewResource(oc, "node", name), time.Time{}}
}
// NewNodeList construct a new node list struct to handle all existing nodes
func NewNodeList(oc *exutil.CLI) *NodeList {
return &NodeList{*NewResourceList(oc, "node")}
}
// String implements the Stringer interface
func (n Node) String() string {
return n.GetName()
}
// DebugNodeWithChroot creates a debugging session of the node with chroot
func (n *Node) DebugNodeWithChroot(cmd ...string) (string, error) {
var (
out string
err error
numRetries = 3
)
n.oc.NotShowInfo()
defer n.oc.SetShowInfo()
for i := 0; i < numRetries; i++ {
if i > 0 {
logger.Infof("Error happened: %s.\nRetrying command. Num retries: %d", err, i)
}
out, err = exutil.DebugNodeWithChroot(n.oc, n.name, cmd...)
if err == nil {
return out, nil
}
}
return out, err
}
// DebugNodeWithChrootStd creates a debugging session of the node with chroot and only returns separated stdout and stderr
func (n *Node) DebugNodeWithChrootStd(cmd ...string) (string, string, error) {
var (
stdout string
stderr string
err error
numRetries = 3
)
setErr := quietSetNamespacePrivileged(n.oc, n.oc.Namespace())
if setErr != nil {
return "", "", setErr
}
cargs := []string{"node/" + n.GetName(), "--", "chroot", "/host"}
cargs = append(cargs, cmd...)
for i := 0; i < numRetries; i++ {
if i > 0 {
logger.Infof("Error happened: %s.\nRetrying command. Num retries: %d", err, i)
}
stdout, stderr, err = n.oc.Run("debug").Args(cargs...).Outputs()
if err == nil {
return stdout, stderr, nil
}
}
recErr := quietRecoverNamespaceRestricted(n.oc, n.oc.Namespace())
if recErr != nil {
return "", "", recErr
}
return stdout, stderr, err
}
// DebugNodeWithOptions launch debug container with options e.g. --image
func (n *Node) DebugNodeWithOptions(options []string, cmd ...string) (string, error) {
var (
out string
err error
numRetries = 3
)
for i := 0; i < numRetries; i++ {
if i > 0 {
logger.Infof("Error happened: %s.\nRetrying command. Num retries: %d", err, i)
}
out, err = exutil.DebugNodeWithOptions(n.oc, n.name, options, cmd...)
if err == nil {
return out, nil
}
}
return out, err
}
// DebugNode creates a debugging session of the node
func (n *Node) DebugNode(cmd ...string) (string, error) {
return exutil.DebugNode(n.oc, n.name, cmd...)
}
// DeleteLabel removes the given label from the node
func (n *Node) DeleteLabel(label string) (string, error) {
logger.Infof("Delete label %s from node %s", label, n.GetName())
return exutil.DeleteLabelFromNode(n.oc, n.name, label)
}
// WaitForLabelRemoved waits until the given label is not present in the node.
func (n *Node) WaitForLabelRemoved(label string) error {
logger.Infof("Waiting for label %s to be removed from node %s", label, n.GetName())
immediate := true
waitErr := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, 10*time.Minute, immediate, func(_ context.Context) (bool, error) {
labels, err := n.Get(`{.metadata.labels}`)
if err != nil {
logger.Infof("Error waiting for labels to be removed:%v, and try next round", err)
return false, nil
}
labelsMap := JSON(labels)
label, err := labelsMap.GetSafe(label)
if err == nil && !label.Exists() {
logger.Infof("Label %s has been removed from node %s", label, n.GetName())
return true, nil
}
return false, nil
})
if waitErr != nil {
logger.Errorf("Timeout while waiting for label %s to be delete from node %s. Error: %s",
label,
n.GetName(),
waitErr)
}
return waitErr
}
// GetMachineConfigDaemon returns the name of the ConfigDaemon pod for this node
func (n *Node) GetMachineConfigDaemon() string {
machineConfigDaemon, err := exutil.GetPodName(n.oc, "openshift-machine-config-operator", "k8s-app=machine-config-daemon", n.name)
o.Expect(err).NotTo(o.HaveOccurred())
return machineConfigDaemon
}
// GetNodeHostname returns the cluster node hostname
func (n *Node) GetNodeHostname() (string, error) {
return exutil.GetNodeHostname(n.oc, n.name)
}
// ForceReapplyConfiguration create the file `/run/machine-config-daemon-force` in the node
// in order to force MCO to reapply the current configuration
func (n *Node) ForceReapplyConfiguration() error {
logger.Infof("Forcing reapply configuration in node %s", n.GetName())
_, err := n.DebugNodeWithChroot("touch", "/run/machine-config-daemon-force")
return err
}
// GetUnitStatus executes `systemctl status` command on the node and returns the output
func (n *Node) GetUnitStatus(unitName string) (string, error) {
return n.DebugNodeWithChroot("systemctl", "status", unitName)
}
// UnmaskService executes `systemctl unmask` command on the node and returns the output
func (n *Node) UnmaskService(svcName string) (string, error) {
return n.DebugNodeWithChroot("systemctl", "unmask", svcName)
}
// GetUnitProperties executes `systemctl show $unitname`, can be used to checkout service dependency
func (n *Node) GetUnitProperties(unitName string, args ...string) (string, error) {
cmd := append([]string{"systemctl", "show", unitName}, args...)
stdout, _, err := n.DebugNodeWithChrootStd(cmd...)
return stdout, err
}
// GetUnitActiveEnterTime returns the last time when the unit entered in active status
func (n *Node) GetUnitActiveEnterTime(unitName string) (time.Time, error) {
cmdOut, err := n.GetUnitProperties(unitName, "--timestamp=unix", "-P", "ActiveEnterTimestamp")
if err != nil {
return time.Time{}, err
}
logger.Infof("Active enter time output: [%s]", cmdOut)
// The output should have this format
// sh-5.1# systemctl show crio.service --timestamp=unix -P ActiveEnterTimestamp
// @1709918801
r := regexp.MustCompile(`^\@(?P<unix_timestamp>[0-9]+)$`)
match := r.FindStringSubmatch(cmdOut)
if len(match) == 0 {
msg := fmt.Sprintf("Wrong property format. Expected a format like '@1709918801', but got '%s'", cmdOut)
logger.Infof(msg)
return time.Time{}, fmt.Errorf(msg)
}
unixTimeIndex := r.SubexpIndex("unix_timestamp")
unixTime := match[unixTimeIndex]
iUnixTime, err := strconv.ParseInt(unixTime, 10, 64)
if err != nil {
return time.Time{}, err
}
activeEnterTime := time.Unix(iUnixTime, 0)
logger.Infof("Unit %s ActiveEnterTimestamp %s", unitName, activeEnterTime)
return activeEnterTime, nil
}
// GetUnitActiveEnterTime returns the last time when the unit entered in active status
// Parse ExecReload={ path=/bin/kill ; argv[]=/bin/kill -s HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }
// If the service was never reloaded, then we return an empty time.Time{} and no error.
func (n *Node) GetUnitExecReloadStartTime(unitName string) (time.Time, error) {
cmdOut, err := n.GetUnitProperties(unitName, "--timestamp=unix", "-P", "ExecReload")
if err != nil {
return time.Time{}, err
}
logger.Infof("Reload start time output: [%s]", cmdOut)
// The output should have this format
// sh-5.1# systemctl show crio.service --timestamp=unix -P ExecReload
// @1709918801
r := regexp.MustCompile(`start_time=\[(?P<unix_timestamp>@[0-9]+|n\/a)\]`)
match := r.FindStringSubmatch(cmdOut)
if len(match) == 0 {
msg := fmt.Sprintf("Wrong property format. Expected a format like 'start_time=[@1709918801]', but got '%s'", cmdOut)
logger.Infof(msg)
return time.Time{}, fmt.Errorf(msg)
}
unixTimeIndex := r.SubexpIndex("unix_timestamp")
unixTime := match[unixTimeIndex]
if unixTime == "n/a" {
logger.Infof("Crio was never reloaded. Reload Start Time = %s", unixTime)
return time.Time{}, nil
}
iUnixTime, err := strconv.ParseInt(strings.Replace(unixTime, "@", "", 1), 10, 64)
if err != nil {
return time.Time{}, err
}
activeEnterTime := time.Unix(iUnixTime, 0)
logger.Infof("Unit %s ExecReload start time %s", unitName, activeEnterTime)
return activeEnterTime, nil
}
// GetUnitDependencies executes `systemctl list-dependencies` with arguments like --before --after
func (n *Node) GetUnitDependencies(unitName string, opts ...string) (string, error) {
options := []string{"systemctl", "list-dependencies", unitName}
if len(opts) > 0 {
options = append(options, opts...)
}
return n.DebugNodeWithChroot(options...)
}
// IsUnitActive check unit is active or inactive
func (n *Node) IsUnitActive(unitName string) bool {
output, _, err := n.DebugNodeWithChrootStd("systemctl", "is-active", unitName)
if err != nil {
logger.Errorf("Get unit state for %s failed: %v", unitName, err)
return false
}
logger.Infof("Unit %s state is: %s", unitName, output)
return output == "active"
}
// IsUnitEnabled check unit enablement state is enabled/enabled-runtime or others e.g. disabled
func (n *Node) IsUnitEnabled(unitName string) bool {
output, _, err := n.DebugNodeWithChrootStd("systemctl", "is-enabled", unitName)
if err != nil {
logger.Errorf("Get unit enablement state for %s failed: %v", unitName, err)
return false
}
logger.Infof("Unit %s enablement state is: %s ", unitName, output)
return strings.HasPrefix(output, "enabled")
}
// GetRpmOstreeStatus returns the rpm-ostree status in json format
func (n Node) GetRpmOstreeStatus(asJSON bool) (string, error) {
args := []string{"rpm-ostree", "status"}
if asJSON {
args = append(args, "--json")
}
stringStatus, _, err := n.DebugNodeWithChrootStd(args...)
logger.Debugf("json rpm-ostree status:\n%s", stringStatus)
return stringStatus, err
}
// GetBootedOsTreeDeployment returns the ostree deployment currently booted. In json format
func (n Node) GetBootedOsTreeDeployment(asJSON bool) (string, error) {
if asJSON {
stringStatus, err := n.GetRpmOstreeStatus(true)
if err != nil {
return "", err
}
deployments := JSON(stringStatus).Get("deployments")
for _, item := range deployments.Items() {
booted := item.Get("booted").ToBool()
if booted {
return item.AsJSONString()
}
}
} else {
stringStatus, err := n.GetRpmOstreeStatus(false)
if err != nil {
return "", err
}
deployments := strings.Split(stringStatus, "\n\n")
for _, deployment := range deployments {
if strings.Contains(deployment, "*") {
return deployment, nil
}
}
}
logger.Infof("WARNING! No booted deployment found in node %s", n.GetName())
return "", nil
}
// GetCurrentBootOSImage returns the osImage currently used to boot the node
func (n Node) GetCurrentBootOSImage() (string, error) {
deployment, err := n.GetBootedOsTreeDeployment(true)
if err != nil {
return "", fmt.Errorf("Error getting the rpm-ostree status value.\n%s", err)
}
containerRef, jerr := JSON(deployment).GetSafe("container-image-reference")
if jerr != nil {
return "", fmt.Errorf("We cant get 'container-image-reference' from the deployment status. Wrong rpm-ostree status!.\n%s\n%s", jerr, deployment)
}
logger.Infof("Current booted container-image-reference: %s", containerRef)
imageSplit := strings.Split(containerRef.ToString(), ":")
lenImageSplit := len(imageSplit)
if lenImageSplit < 2 {
return "", fmt.Errorf("Wrong container-image-reference in deployment:\n%s\n%s", err, deployment)
}
// remove the "ostree-unverified-registry:" part of the image
// remove the "containers-storage:" part of the image
// it can have these modifiers: ostree-unverified-image:containers-storage:quay.io/openshift-.....
// we need to take into account this kind of images too -> ostree-unverified-registry:image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/ocb-worker-image@sha256:da29d9033c...
image := imageSplit[lenImageSplit-2] + ":" + imageSplit[lenImageSplit-1]
// we need to check if the image includes the port too
if lenImageSplit > 2 {
_, err := strconv.Atoi(strings.Split(image, "/")[0])
// the image url includes the port. It is in the format my.doamin:port/my/path
if err == nil {
image = imageSplit[lenImageSplit-3] + ":" + image
}
}
image = strings.TrimSpace(image)
logger.Infof("Booted image: %s", image)
return image, nil
}
// Cordon cordons the node by running the "oc adm cordon" command
func (n *Node) Cordon() error {
return n.oc.Run("adm").Args("cordon", n.GetName()).Execute()
}
// Uncordon uncordons the node by running the "oc adm uncordon" command
func (n *Node) Uncordon() error {
return n.oc.Run("adm").Args("uncordon", n.GetName()).Execute()
}
// IsCordoned returns true if the node is cordoned
func (n *Node) IsCordoned() (bool, error) {
key, err := n.Get(`{.spec.taints[?(@.key=="node.kubernetes.io/unschedulable")].key}`)
if err != nil {
return false, err
}
return key != "", nil
}
// IsCordonedOrFail returns true if the node is cordoned. It fails the test is there is any error
func (n *Node) IsCordonedOrFail() bool {
isCordoned, err := n.IsCordoned()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the tains in node %s", n.GetName())
return isCordoned
}
// RestoreDesiredConfig changes the value of the desiredConfig annotation to equal the value of currentConfig. desiredConfig=currentConfig.
func (n *Node) RestoreDesiredConfig() error {
currentConfig := n.GetCurrentMachineConfig()
if currentConfig == "" {
return fmt.Errorf("currentConfig annotation has an empty value in node %s", n.GetName())
}
logger.Infof("Node: %s. Restoring desiredConfig value to match currentConfig value: %s", n.GetName(), currentConfig)
currentImage := n.GetCurrentImage()
if currentImage == "" {
return n.PatchDesiredConfig(currentConfig)
}
logger.Infof("Node: %s. Restoring desiredImage value to match currentImage value: %s", n.GetName(), currentImage)
return n.PatchDesiredConfigAndDesiredImage(currentConfig, currentImage)
}
// GetCurrentMachineConfig returns the ID of the current machine config used in the node
func (n Node) GetCurrentMachineConfig() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/currentConfig}`)
}
// GetCurrentImage returns the current image used in this node
func (n Node) GetCurrentImage() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/currentImage}`)
}
// GetDesiredMachineConfig returns the ID of the machine config that we want the node to use
func (n Node) GetDesiredMachineConfig() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/desiredConfig}`)
}
// GetMachineConfigState returns the State of machineconfiguration process
func (n Node) GetMachineConfigState() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/state}`)
}
// GetMachineConfigReason returns the Reason of machineconfiguration on this node
func (n Node) GetMachineConfigReason() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/reason}`)
}
// GetDesiredConfig returns the desired machine config for this node
func (n Node) GetDesiredConfig() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/desiredConfig}`)
}
// PatchDesiredConfig patches the desiredConfig annotation with the provided value
func (n *Node) PatchDesiredConfig(desiredConfig string) error {
return n.Patch("merge", `{"metadata":{"annotations":{"machineconfiguration.openshift.io/desiredConfig":"`+desiredConfig+`"}}}`)
}
// PatchDesiredConfigAndImage patches the desiredConfig annotation and the desiredImage annotation with the provided values
func (n *Node) PatchDesiredConfigAndDesiredImage(desiredConfig, desiredImage string) error {
return n.Patch("merge", `{"metadata":{"annotations":{"machineconfiguration.openshift.io/desiredConfig":"`+desiredConfig+`", "machineconfiguration.openshift.io/desiredImage":"`+desiredImage+`"}}}`)
}
// GetDesiredDrain returns the last desired machine config that needed a drain operation in this node
func (n Node) GetDesiredDrain() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/desiredDrain}`)
}
// GetLastAppliedDrain returns the last applied drain in this node
func (n Node) GetLastAppliedDrain() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/lastAppliedDrain}`)
}
// HasBeenDrained returns a true if the desired and the last applied drain annotations have the same value
func (n Node) HasBeenDrained() bool {
return n.GetLastAppliedDrain() == n.GetDesiredDrain()
}
// IsUpdated returns if the node is pending for machineconfig configuration or it is up to date
func (n *Node) IsUpdated() bool {
return (n.GetCurrentMachineConfig() == n.GetDesiredMachineConfig()) && (n.GetMachineConfigState() == "Done")
}
// IsTainted returns if the node hast taints or not
func (n *Node) IsTainted() bool {
taint, err := n.Get("{.spec.taints}")
return err == nil && taint != ""
}
// Returns true if the node is schedulable
func (n *Node) IsSchedulable() (bool, error) {
unschedulable, err := n.Get(`{.spec.unschedulable}`)
if err != nil {
return false, err
}
return !IsTrue(unschedulable), nil
}
// Returns true if the node is schedulable and fails the test if there is an error
func (n *Node) IsSchedulableOrFail() bool {
schedulable, err := n.IsSchedulable()
o.Expect(err).NotTo(o.HaveOccurred(), "Error while getting the taints in node %s", n.GetName())
return schedulable
}
// HasTaintEffect Returns true if the node has any taint with the given effect
func (n *Node) HasTaintEffect(taintEffect string) (bool, error) {
taint, err := n.Get(`{.spec.taints[?(@.effect=="` + taintEffect + `")]}`)
if err != nil {
return false, err
}
return taint != "", nil
}
// HasTaintEffectOrFail Returns true if the node has any taint with the given effect and fails the test if any error happened
func (n *Node) HasTaintEffectOrFail(taintEffect string) bool {
hasTaintEffect, err := n.HasTaintEffect(taintEffect)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error while getting the taints effects in node %s", n.GetName())
return hasTaintEffect
}
// IsEdge Returns true if th node is an edge node
func (n *Node) IsEdge() (bool, error) {
_, err := n.GetLabel(`node-role.kubernetes.io/edge`)
if err != nil {
if strings.Contains(err.Error(), "not found") {
return false, nil
}
return false, err
}
return true, nil
}
// IsEdgeOrFail Returns true if th node is an edge node and fails the test if any error happens
func (n *Node) IsEdgeOrFail() bool {
isEdge, err := n.IsEdge()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Error finding out if node %s is an edge node", n)
return isEdge
}
// IsUpdating returns if the node is currently updating the machine configuration
func (n *Node) IsUpdating() bool {
return n.GetMachineConfigState() == "Working"
}
// IsReady returns boolean 'true' if the node is ready. Else it retruns 'false'.
func (n Node) IsReady() bool {
return n.IsConditionStatusTrue("Ready")
}
// GetMCDaemonLogs returns the logs of the MachineConfig daemonset pod for this node. The logs will be grepped using the 'filter' parameter
func (n Node) GetMCDaemonLogs(filter string) (string, error) {
var (
mcdLogs = ""
err error
)
err = Retry(5, 5*time.Second, func() error {
mcdLogs, err = exutil.GetSpecificPodLogs(n.oc, MachineConfigNamespace, "machine-config-daemon", n.GetMachineConfigDaemon(), filter)
return err
})
return mcdLogs, err
}
// PollMCDaemonLogs returns a function that can be used by gomega Eventually/Consistently functions to poll logs results
// If there is an error, it will return empty string, new need to take that into account building our Eventually/Consistently statement
func (n Node) PollMCDaemonLogs(filter string) func() string {
return func() string {
logs, err := n.GetMCDaemonLogs(filter)
if err != nil {
return ""
}
return logs
}
}
// CaptureMCDaemonLogsUntilRestartWithTimeout captures all the logs in the MachineConfig daemon pod for this node until the daemon pod is restarted
func (n *Node) CaptureMCDaemonLogsUntilRestartWithTimeout(timeout string) (string, error) {
var (
logs = ""
err error
machineConfigDaemon = n.GetMachineConfigDaemon()
)
duration, err := time.ParseDuration(timeout)
if err != nil {
return "", err
}
c := make(chan string, 1)
go func() {
err = Retry(5, 5*time.Second, func() error {
var err error
logs, err = n.oc.WithoutNamespace().Run("logs").Args("-n", MachineConfigNamespace, machineConfigDaemon, "-c", "machine-config-daemon", "-f").Output()
if err != nil {
logger.Errorf("Retrying because: Error getting %s logs. Error: %s\nOutput: %s", machineConfigDaemon, err, logs)
}
return err
})
if err != nil {
logger.Errorf("Error getting %s logs. Error: %s", machineConfigDaemon, err)
}
c <- logs
}()
select {
case logs := <-c:
return logs, nil
case <-time.After(duration):
errMsg := fmt.Sprintf(`Node "%s". Timeout while waiting for the daemon pod "%s" -n "%s" to be restarted`,
n.GetName(), machineConfigDaemon, MachineConfigNamespace)
logger.Infof(errMsg)
return "", fmt.Errorf(errMsg)
}
}
// GetDateOrFail executes `date`command and returns the current time in the node and fails the test case if there is any error
func (n Node) GetDateOrFail() time.Time {
date, err := n.GetDate()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Could not get the current date in %s", n)
return date
}
// GetDate executes `date`command and returns the current time in the node
func (n Node) GetDate() (time.Time, error) {
date, _, err := n.DebugNodeWithChrootStd(`date`, `+%Y-%m-%dT%H:%M:%SZ`)
logger.Infof("node %s. DATE: %s", n.GetName(), date)
if err != nil {
logger.Errorf("Error trying to get date in node %s: %s", n.GetName(), err)
return time.Time{}, err
}
layout := "2006-01-02T15:04:05Z"
returnTime, perr := time.Parse(layout, date)
if perr != nil {
logger.Errorf("Error trying to parsing date %s in node %s: %s", date, n.GetName(), perr)
return time.Time{}, perr
}
return returnTime, nil
}
// GetUptime executes `uptime -s` command and returns the time when the node was booted
func (n Node) GetUptime() (time.Time, error) {
uptime, _, err := n.DebugNodeWithChrootStd(`uptime`, `-s`)
logger.Infof("node %s. UPTIME: %s", n.GetName(), uptime)
if err != nil {
logger.Errorf("Error trying to get uptime in node %s: %s", n.GetName(), err)
return time.Time{}, err
}
layout := "2006-01-02 15:04:05"
returnTime, perr := time.Parse(layout, uptime)
if perr != nil {
logger.Errorf("Error trying to parsing uptime %s in node %s: %s", uptime, n.GetName(), perr)
return time.Time{}, perr
}
return returnTime, nil
}
// GetEventsByReasonSince returns a list of all the events with the given reason that are related to this node since the provided date
func (n Node) GetEventsByReasonSince(since time.Time, reason string) ([]Event, error) {
eventList := NewEventList(n.oc, MachineConfigNamespace)
eventList.ByFieldSelector(`reason=` + reason + `,involvedObject.name=` + n.GetName())
return eventList.GetAllSince(since)
}
// GetAllEventsSince returns a list of all the events related to this node since the provided date
func (n Node) GetAllEventsSince(since time.Time) ([]Event, error) {
eventList := NewEventList(n.oc, MachineConfigNamespace)
eventList.ByFieldSelector(`involvedObject.name=` + n.GetName())
return eventList.GetAllSince(since)
}
// GetAllEventsSinceEvent returns a list of all the events related to this node that occurred after the provided event
func (n Node) GetAllEventsSinceEvent(since *Event) ([]Event, error) {
eventList := NewEventList(n.oc, MachineConfigNamespace)
eventList.ByFieldSelector(`involvedObject.name=` + n.GetName())
return eventList.GetAllEventsSinceEvent(since)
}
// GetLatestEvent returns the latest event occurred in the node
func (n Node) GetLatestEvent() (*Event, error) {
eventList := NewEventList(n.oc, MachineConfigNamespace)
eventList.ByFieldSelector(`involvedObject.name=` + n.GetName())
return eventList.GetLatest()
}
// GetEvents retunrs all the events that happened in this node since IgnoreEventsBeforeNow()() method was called.
//
// If IgnoreEventsBeforeNow() is not called, it returns all existing events for this node.
func (n *Node) GetEvents() ([]Event, error) {
return n.GetAllEventsSince(n.eventCheckpoint)
}
func (n *Node) IgnoreEventsBeforeNow() error {
var err error
latestEvent, lerr := n.GetLatestEvent()
if lerr != nil {
return lerr
}
logger.Infof("Latest event in node %s was: %s", n.GetName(), latestEvent)
if latestEvent == nil {
logger.Infof("Since no event was found for node %s, we will not ignore any event", n.GetName())
n.eventCheckpoint = time.Time{}
return nil
}
logger.Infof("Ignoring all previous events!")
n.eventCheckpoint, err = latestEvent.GetLastTimestamp()
return err
}
// GetDateWithDelta returns the date in the node +delta
func (n Node) GetDateWithDelta(delta string) (time.Time, error) {
date, err := n.GetDate()
if err != nil {
return time.Time{}, err
}
timeDuration, terr := time.ParseDuration(delta)
if terr != nil {
logger.Errorf("Error getting delta time %s", terr)
return time.Time{}, terr
}
return date.Add(timeDuration), nil
}
// IsFIPSEnabled check whether fips is enabled on node
func (n *Node) IsFIPSEnabled() (bool, error) {
output, err := n.DebugNodeWithChroot("fips-mode-setup", "--check")
if err != nil {
logger.Errorf("Error checking fips mode %s", err)
}
return strings.Contains(output, "FIPS mode is enabled"), err
}
// IsKernelArgEnabled check whether kernel arg is enabled on node
func (n *Node) IsKernelArgEnabled(karg string) (bool, error) {
unameOut, unameErr := n.DebugNodeWithChroot("bash", "-c", "uname -a")
if unameErr != nil {
logger.Errorf("Error checking kernel arg via uname -a: %v", unameErr)
return false, unameErr
}
cliOut, cliErr := n.DebugNodeWithChroot("cat", "/proc/cmdline")
if cliErr != nil {
logger.Errorf("Err checking kernel arg via /proc/cmdline: %v", cliErr)
return false, cliErr
}
return (strings.Contains(unameOut, karg) || strings.Contains(cliOut, karg)), nil
}
// IsRealTimeKernel returns true if the node is using a realtime kernel
func (n *Node) IsRealTimeKernel() (bool, error) {
// we can use the IsKernelArgEnabled to check the realtime kernel
return n.IsKernelArgEnabled("PREEMPT_RT")
}
// Is64kPagesKernel returns true if the node is using a 64k-pages kernel
func (n *Node) Is64kPagesKernel() (bool, error) {
// we can use the IsKernelArgEnabled to check the 64k-pages kernel
return n.IsKernelArgEnabled("+64k")
}
// InstallRpm installs the rpm in the node using rpm-ostree command
func (n *Node) InstallRpm(rpmName string) (string, error) {
logger.Infof("Installing rpm '%s' in node %s", rpmName, n.GetName())
out, err := n.DebugNodeWithChroot("rpm-ostree", "install", rpmName)
return out, err
}
// UninstallRpm installs the rpm in the node using rpm-ostree command
func (n *Node) UninstallRpm(rpmName string) (string, error) {
logger.Infof("Uninstalling rpm '%s' in node %s", rpmName, n.GetName())
out, err := n.DebugNodeWithChroot("rpm-ostree", "uninstall", rpmName)
return out, err
}
// Reboot reboots the node after waiting 10 seconds. To know why look https://issues.redhat.com/browse/OCPBUGS-1306
func (n *Node) Reboot() error {
afterSeconds := 1
logger.Infof("REBOOTING NODE %s after %d seconds!!", n.GetName(), afterSeconds)
// In SNO we cannot trigger the reboot directly using a debug command (like: "sleep 10 && reboot"), because the debug pod will return a failure
// because we lose connectivity with the cluster when we reboot the only node in the cluster
// The solution is to schedule a reboot 1 second after the Reboot method is called, and wait 5 seconds to make sure that the reboot happened
out, err := n.DebugNodeWithChroot("sh", "-c", fmt.Sprintf("systemd-run --on-active=%d --timer-property=AccuracySec=10ms reboot", afterSeconds))
if err != nil {
logger.Errorf("Error rebooting node %s:\n%s", n, out)
}
time.Sleep(time.Duration(afterSeconds) * time.Second) // we don't return the control of the program until we make sure that the timer for the reboot has expired
return err
}
// IsRpmOsTreeIdle returns true if `rpm-ostree status` reports iddle state
func (n *Node) IsRpmOsTreeIdle() (bool, error) {
status, err := n.GetRpmOstreeStatus(false)
if strings.Contains(status, "State: idle") {
return true, err
}
return false, err
}
// WaitUntilRpmOsTreeIsIdle waits until rpm-ostree reports an idle state. Returns an error if times out
func (n *Node) WaitUntilRpmOsTreeIsIdle() error {
logger.Infof("Waiting for rpm-ostree state to be idle in node %s", n.GetName())
immediate := false
waitErr := wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 10*time.Minute, immediate, func(_ context.Context) (bool, error) {
isIddle, err := n.IsRpmOsTreeIdle()
if err == nil {
if isIddle {
return true, nil
}
return false, nil
}
logger.Infof("Error waiting for rpm-ostree status to report idle state: %s.\nTry next round", err)
return false, nil
})
if waitErr != nil {
logger.Errorf("Timeout while waiting for rpm-ostree status to report idle state in node %s. Error: %s",
n.GetName(),
waitErr)
}
return waitErr
}
// CancelRpmOsTreeTransactions cancels rpm-ostree transactions
func (n *Node) CancelRpmOsTreeTransactions() (string, error) {
return n.DebugNodeWithChroot("rpm-ostree", "cancel")
}
// CopyFromLocal Copy a local file or directory to the node
func (n *Node) CopyFromLocal(from, to string) error {
immediate := true
waitErr := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, 5*time.Minute, immediate, func(_ context.Context) (bool, error) {
kubeletReady := n.IsReady()
if kubeletReady {
return true, nil
}
logger.Warnf("Kubelet is not ready in %s. To copy the file to the node we need to wait for kubelet to be ready. Waiting...", n)
return false, nil
})
if waitErr != nil {
logger.Errorf("Cannot copy file %s to %s in node %s because Kubelet is not ready in this node", from, to, n)
return waitErr
}
return n.oc.Run("adm").Args("copy-to-node", "node/"+n.GetName(), fmt.Sprintf("--copy=%s=%s", from, to)).Execute()
}
// CopyToLocal Copy a file or directory in the node to a local path
func (n *Node) CopyToLocal(from, to string) error {
logger.Infof("Node: %s. Copying file %s to local path %s",
n.GetName(), from, to)
mcDaemonName := n.GetMachineConfigDaemon()
fromDaemon := filepath.Join("/rootfs", from)
return n.oc.Run("cp").Args("-n", MachineConfigNamespace, mcDaemonName+":"+fromDaemon, to, "-c", MachineConfigDaemon).Execute()
}
// RemoveFile removes a file from the node
func (n *Node) RemoveFile(filePathToRemove string) error {
logger.Infof("Removing file %s from node %s", filePathToRemove, n.GetName())
output, err := n.DebugNodeWithChroot("rm", "-f", filePathToRemove)
logger.Infof(output)
return err
}
// RpmIsInstalled returns true if the package is installed
func (n *Node) RpmIsInstalled(rpmNames ...string) bool {
rpmOutput, err := n.DebugNodeWithChroot(append([]string{"rpm", "-q"}, rpmNames...)...)
logger.Infof(rpmOutput)
return err == nil
}
// ExecuteExpectBatch run a command and executes an interactive batch sequence using expect
func (n *Node) ExecuteDebugExpectBatch(timeout time.Duration, batch []expect.Batcher) ([]expect.BatchRes, error) {
setErr := quietSetNamespacePrivileged(n.oc, n.oc.Namespace())
if setErr != nil {
return nil, setErr
}
debugCommand := fmt.Sprintf("oc --kubeconfig=%s -n %s debug node/%s",
exutil.KubeConfigPath(), n.oc.Namespace(), n.GetName())
logger.Infof("Expect spawning command: %s", debugCommand)
e, _, err := expect.Spawn(debugCommand, -1, expect.Verbose(true))
defer func() { _ = e.Close() }()
if err != nil {
logger.Errorf("Error spawning process %s. Error: %s", debugCommand, err)
return nil, err
}
bresps, err := e.ExpectBatch(batch, timeout)
if err != nil {
logger.Errorf("Error executing batch: %s", err)
}
recErr := quietRecoverNamespaceRestricted(n.oc, n.oc.Namespace())
if recErr != nil {
return nil, err
}
return bresps, err
}
// UserAdd creates a user in the node
func (n *Node) UserAdd(userName string) error {
logger.Infof("Create user %s in node %s", userName, n.GetName())
_, err := n.DebugNodeWithChroot("useradd", userName)
return err
}
// UserDel deletes a user in the node
func (n *Node) UserDel(userName string) error {
logger.Infof("Delete user %s in node %s", userName, n.GetName())
_, err := n.DebugNodeWithChroot("userdel", "-f", userName)
return err
}
// UserExists returns true if the user exists in the node
func (n *Node) UserExists(userName string) bool {
_, err := n.DebugNodeWithChroot("grep", "-E", fmt.Sprintf("^%s:", userName), "/etc/shadow")
return err == nil
}
// GetRHELVersion returns the RHEL version of the node
func (n *Node) GetRHELVersion() (string, error) {
vContent, err := n.DebugNodeWithChroot("cat", "/etc/os-release")
if err != nil {
return "", err
}
r := regexp.MustCompile(`RHEL_VERSION="?(?P<rhel_version>.*)"?`)
match := r.FindStringSubmatch(vContent)
if len(match) == 0 {
msg := fmt.Sprintf("No RHEL_VERSION available in /etc/os-release file: %s", vContent)
logger.Infof(msg)
return "", fmt.Errorf(msg)
}
rhelvIndex := r.SubexpIndex("rhel_version")
rhelVersion := match[rhelvIndex]
logger.Infof("Node %s RHEL_VERSION %s", n.GetName(), rhelVersion)
return rhelVersion, nil
}
// GetPool returns the only pool owning this node
func (n *Node) GetPrimaryPool() (*MachineConfigPool, error) {
allMCPs, err := NewMachineConfigPoolList(n.oc).GetAll()
if err != nil {
return nil, err
}
var primaryPool *MachineConfigPool
for _, item := range allMCPs {
pool := item
allNodes, err := pool.getSelectedNodes("")
if err != nil {
return nil, err
}
for _, node := range allNodes {
if node.GetName() != n.GetName() {
continue
}
// We use short circuit evaluation to set the primary pool:
// - If the pool is master, it will be the primary pool;
// - If the primary pool is nil (not set yet), we set the primary pool (either worker or custom);
// - If the primary pool is not nil, we overwrite it only if the primary pool is a worker.
if pool.IsMaster() || primaryPool == nil || primaryPool.IsWorker() {
primaryPool = &pool
} else if pool.IsCustom() && primaryPool != nil && primaryPool.IsCustom() {
// Error condition: the node belongs to 2 custom pools
return nil, fmt.Errorf("Forbidden configuration. The node %s belongs to 2 custom pools: %s and %s",
node.GetName(), primaryPool.GetName(), pool.GetName())
}
}
}
return primaryPool, nil
}
// GetPool returns the only pool owning this node and fails the test if any error happened
func (n *Node) GetPrimaryPoolOrFail() *MachineConfigPool {
pool, err := n.GetPrimaryPool()
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(),
"Error getting the pool that owns node %", n.GetName())
return pool
}
// GetPools returns a list with all the MCPs matching this node's labels. An node can be listed by n more than one pool.
func (n *Node) GetPools() ([]MachineConfigPool, error) {
allPools, err := NewMachineConfigPoolList(n.oc).GetAll()
if err != nil {
return nil, err
}
nodePools := []MachineConfigPool{}
for _, mcp := range allPools {
// Get all nodes labeled for this pool
allNodes, err := mcp.getSelectedNodes("")
if err != nil {
return nil, err
}
for _, node := range allNodes {
if n.GetName() == node.GetName() {
nodePools = append(nodePools, mcp)
}
}
}
return nodePools, nil
}
// IsListedByPoolOrFail returns true if this node is listed by the MPC configured labels. If an error happens it fails the test.
func (n *Node) IsListedByPoolOrFail(mcp *MachineConfigPool) bool {
isInPool, err := n.IsListedByPool(mcp)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(),
"Cannot get the list of pools for node %s", n.GetName())
return isInPool
}
// IsListedByPool returns true if this node is listed by the MCP configured labels.
func (n *Node) IsListedByPool(mcp *MachineConfigPool) (bool, error) {
pools, err := n.GetPools()
if err != nil {
return false, err
}
for _, pool := range pools {
if pool.GetName() == mcp.GetName() {
return true, nil
}
}
return false, nil
}
// RemoveIPTablesRulesByRegexp removes all the iptables rules printed by `iptables -S` that match the given regexp
func (n *Node) RemoveIPTablesRulesByRegexp(regx string) ([]string, error) {
return n.removeIPTablesRulesByRegexp(false, regx)
}
// RemoveIP6TablesRulesByRegexp removes all the iptables rules printed by `ip6tables -S` that match the given regexp
func (n *Node) RemoveIP6TablesRulesByRegexp(regx string) ([]string, error) {
return n.removeIPTablesRulesByRegexp(true, regx)
}
// removeIPTablesRulesByRegexp removes all the iptables rules printed by `iptables -S` or `ip6tables -S` that match the given regexp
func (n *Node) removeIPTablesRulesByRegexp(ipv6 bool, regx string) ([]string, error) {
removedRules := []string{}
command := "iptables"
if ipv6 == true {
command = "ip6tables"
}
allRulesString, stderr, err := n.DebugNodeWithChrootStd(command, "-S")
if err != nil {
logger.Errorf("Error running `%s -S`. Stderr: %s", command, stderr)
return nil, err
}
allRules := strings.Split(allRulesString, "\n")
for _, rule := range allRules {
if !regexp.MustCompile(regx).MatchString(rule) {
continue
}
logger.Infof("%s. Removing %s rule: %s", n.GetName(), command, rule)
removeCommand := strings.Replace(rule, "-A", "-D", 1)
output, err := n.DebugNodeWithChroot(append([]string{command}, splitCommandString(removeCommand)...)...)
// OCPQE-20258 if the rule is removed already, retry will be failed as well. add this logic to catach this error
// if the error message indicates the rule does not exist, i.e. it's already removed, we consider the retry is succeed
alreadyRemoved := strings.Contains(output, "does a matching rule exist in that chain")
if alreadyRemoved {
logger.Warnf("iptable rule %s is already removed", rule)
}
if err != nil && !alreadyRemoved {
logger.Errorf("Output: %s", output)
return removedRules, err
}
removedRules = append(removedRules, rule)
}
return removedRules, err
}
// ExecIPTables executes the iptables commands in the node. The "rules" param is a list of iptables commands to be executed. Each string is a full command.
//
// for example:
// [ "-A OPENSHIFT-BLOCK-OUTPUT -p tcp -m tcp --dport 22623 --tcp-flags FIN,SYN,RST,ACK SYN -j REJECT --reject-with icmp-port-unreachable",
// "-A OPENSHIFT-BLOCK-OUTPUT -p tcp -m tcp --dport 22624 --tcp-flags FIN,SYN,RST,ACK SYN -j REJECT --reject-with icmp-port-unreachable" ]
//
// This function can be used to restore the rules removed by "RemoveIPTablesRulesByRegexp"
func (n *Node) execIPTables(ipv6 bool, rules []string) error {
command := "iptables"
if ipv6 == true {
command = "ip6tables"
}
for _, rule := range rules {
logger.Infof("%s. Adding %s rule: %s", n.GetName(), command, rule)
output, err := n.DebugNodeWithChroot(append([]string{command}, splitCommandString(rule)...)...)
if err != nil {
logger.Errorf("Output: %s", output)
return err
}
}
return nil
}
// ExecIPTables execute the iptables command in the node
func (n *Node) ExecIPTables(rules []string) error {
return n.execIPTables(false, rules)
}
// ExecIPTables execute the ip6tables command in the node
func (n *Node) ExecIP6Tables(rules []string) error {
return n.execIPTables(true, rules)
}
// GetArchitecture get the architecture used in the node
func (n Node) GetArchitecture() (architecture.Architecture, error) {
arch, err := n.Get(`{.status.nodeInfo.architecture}`)
if err != nil {
return architecture.UNKNOWN, err
}
return architecture.FromString(arch), nil
}
// GetArchitectureOrFail get the architecture used in the node and fail the test if any error happens while doing it
func (n Node) GetArchitectureOrFail() architecture.Architecture {
arch, err := n.GetArchitecture()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the architecture of node %s", n.GetName())
return arch
}
// GetJournalLogs returns the journal logs
func (n *Node) GetJournalLogs(args ...string) (string, error) {
cmd := []string{"journalctl", "-o", "with-unit"}
return n.DebugNodeWithChroot(append(cmd, args...)...)
}
// GetMachineConfigNode returns the MachineConfigNode resource linked to this node
func (n *Node) GetMachineConfigNode() *MachineConfigNode {
return NewMachineConfigNode(n.oc.AsAdmin(), n.GetName())
}
// GetFileSystemSpaceUsage returns the space usage in the node
// Parse command
// $ df -B1 --output=used,avail /
//
// Used Avail
//
// 38409719808 7045369856
func (n *Node) GetFileSystemSpaceUsage(path string) (*SpaceUsage, error) {
var (
parserRegex = `(?P<Used>\d+)\D+(?P<Avail>\d+)`
)
stdout, stderr, err := n.DebugNodeWithChrootStd("df", "-B1", "--output=used,avail", path)
if err != nil {
logger.Errorf("Error getting the disk usage in node %s:\nstdout:%s\nstderr:%s",
n.GetName(), stdout, stderr)
return nil, err
}
lines := strings.Split(stdout, "\n")
if len(lines) != 2 {
return nil, fmt.Errorf("Expected 2 lines, and got:\n%s", stdout)
}
logger.Debugf("parsing: %s", lines[1])
re := regexp.MustCompile(parserRegex)
match := re.FindStringSubmatch(strings.TrimSpace(lines[1]))
logger.Infof("matched disk space stat info: %v", match)
// check whether matched string found
if len(match) == 0 {
return nil, fmt.Errorf("no disk space info matched")
}
usedIndex := re.SubexpIndex("Used")
if usedIndex < 0 {
return nil, fmt.Errorf("Could not parse Used bytes from\n%s", stdout)
}
used, err := strconv.ParseInt(match[usedIndex], 10, 64)
if err != nil {
return nil, fmt.Errorf("Could convert parsed Used data [%s] into float64 from\n%s", match[usedIndex], stdout)
}
availIndex := re.SubexpIndex("Avail")
if usedIndex < 0 {
return nil, fmt.Errorf("Could not parse Avail bytes from\n%s", stdout)
}
avail, err := strconv.ParseInt(match[availIndex], 10, 64)
if err != nil {
return nil, fmt.Errorf("Could convert parsed Avail data [%s] into float64 from\n%s", match[availIndex], stdout)
}
return &SpaceUsage{Used: used, Avail: avail}, nil
}
// GetMachine returns the machine used to create this node
func (n Node) GetMachine() (*Machine, error) {
machineLabel, err := n.GetAnnotation("machine.openshift.io/machine")
if err != nil {
return nil, err
}
machineLabelSplit := strings.Split(machineLabel, "/")
if len(machineLabelSplit) != 2 {
return nil, fmt.Errorf("Malformed machine label %s in node %s", machineLabel, n.GetName())
}
machineName := machineLabelSplit[1]
return NewMachine(n.GetOC(), "openshift-machine-api", machineName), nil
}
// CanUseDnfDownload returns true if dnf can use the download subcommand
func (n Node) CanUseDnfDownload() (bool, error) {
out, err := n.DebugNodeWithChroot("dnf", "download", "--help")
if err != nil {
if strings.Contains(out, "No such command:") || strings.Contains(out, "No such file or directory") {
return false, nil
}
return false, err
}
return true, nil
}
// DnfDownload uses the "dnf download" command to download a rpm package. Returns the full name of the downloaded package
func (n Node) DnfDownload(pkg, dir string) (string, error) {
out, err := n.DebugNodeWithChroot("dnf", "download", pkg, "--destdir", dir)
logger.Infof("Download output: %s", out)
if err != nil {
return "", err
}
expr := `(?P<package>(?m)^` + pkg + `.*rpm)`
r := regexp.MustCompile(expr)
match := r.FindStringSubmatch(out)
if len(match) == 0 {
msg := fmt.Sprintf("Wrong download output. Cannot extract the name of the downloaded package using expresion %s:\n%s", expr, out)
logger.Errorf(msg)
return "", fmt.Errorf(msg)
}
pkgNameIndex := r.SubexpIndex("package")
pkgName := match[pkgNameIndex]
fullPkgName := path.Join(dir, pkgName)
logger.Infof("Downloaded package: %s", fullPkgName)
return fullPkgName, nil
}
// Reset returns the node's OS to its original state, removing any modification applied to it
func (n Node) OSReset() error {
logger.Infof("Resetting the OS in node %s", n)
_, err := n.DebugNodeWithChroot("rpm-ostree", "reset")
return err
}
// GetAll returns a []Node list with all existing nodes
func (nl *NodeList) GetAll() ([]Node, error) {
allNodeResources, err := nl.ResourceList.GetAll()
if err != nil {
return nil, err
}
allNodes := make([]Node, 0, len(allNodeResources))
for _, nodeRes := range allNodeResources {
allNodes = append(allNodes, *NewNode(nl.oc, nodeRes.name))
}
return allNodes, nil
}
// GetAllLinux resturns a list with all linux nodes in the cluster
func (nl NodeList) GetAllLinux() ([]Node, error) {
nl.ByLabel("kubernetes.io/os=linux")
return nl.GetAll()
}
// GetAllMasterNodes returns a list of master Nodes
func (nl NodeList) GetAllMasterNodes() ([]Node, error) {
nl.ByLabel("node-role.kubernetes.io/master=")
return nl.GetAll()
}
// GetAllWorkerNodes returns a list of worker Nodes
func (nl NodeList) GetAllWorkerNodes() ([]Node, error) {
nl.ByLabel("node-role.kubernetes.io/worker=")
return nl.GetAll()
}
// GetAllMasterNodesOrFail returns a list of master Nodes
func (nl NodeList) GetAllMasterNodesOrFail() []Node {
masters, err := nl.GetAllMasterNodes()
o.Expect(err).NotTo(o.HaveOccurred())
return masters
}
// GetAllWorkerNodesOrFail returns a list of worker Nodes. Fail the test case if an error happens.
func (nl NodeList) GetAllWorkerNodesOrFail() []Node {
workers, err := nl.GetAllWorkerNodes()
o.Expect(err).NotTo(o.HaveOccurred())
return workers
}
// GetAllLinuxWorkerNodes returns a list of linux worker Nodes
func (nl NodeList) GetAllLinuxWorkerNodes() ([]Node, error) {
nl.ByLabel("node-role.kubernetes.io/worker=,kubernetes.io/os=linux")
return nl.GetAll()
}
// GetAllLinuxWorkerNodesOrFail returns a list of linux worker Nodes. Fail the test case if an error happens.
func (nl NodeList) GetAllLinuxWorkerNodesOrFail() []Node {
workers, err := nl.GetAllLinuxWorkerNodes()
o.Expect(err).NotTo(o.HaveOccurred())
return workers
}
// GetAllRhelWokerNodesOrFail returns a list with all RHEL nodes in the cluster. Fail the test if an error happens.
func (nl NodeList) GetAllRhelWokerNodesOrFail() []Node {
nl.ByLabel("node-role.kubernetes.io/worker=,node.openshift.io/os_id=rhel")
workers, err := nl.GetAll()
o.Expect(err).NotTo(o.HaveOccurred())
return workers
}
// GetAllCoreOsWokerNodesOrFail returns a list with all CoreOs nodes in the cluster. Fail the test case if an error happens.
func (nl NodeList) GetAllCoreOsWokerNodesOrFail() []Node {
nl.ByLabel("node-role.kubernetes.io/worker=,node.openshift.io/os_id=rhcos")
workers, err := nl.GetAll()
o.Expect(err).NotTo(o.HaveOccurred())
return workers
}
// GetAllCoreOsNodesOrFail returns a list with all CoreOs nodes including master and workers. Fail the test case if an error happens
func (nl NodeList) GetAllCoreOsNodesOrFail() []Node {
nl.ByLabel("node.openshift.io/os_id=rhcos")
allRhcos, err := nl.GetAll()
o.Expect(err).NotTo(o.HaveOccurred())
return allRhcos
}
// GetTaintedNodes returns a list with all tainted nodes in the cluster. Fail the test if an error happens.
func (nl *NodeList) GetTaintedNodes() []Node {
allNodes, err := nl.GetAll()
o.Expect(err).NotTo(o.HaveOccurred())
taintedNodes := []Node{}
for _, node := range allNodes {
if node.IsTainted() {
taintedNodes = append(taintedNodes, node)
}
}
return taintedNodes
}
// GetAllDegraded returns a list will all the nodes reporting macineconfig degraded state
// metadata.annotations.machineconfiguration\.openshift\.io/state=="Degraded
func (nl NodeList) GetAllDegraded() ([]Node, error) {
filter := `?(@.metadata.annotations.machineconfiguration\.openshift\.io/state=="Degraded")`
nl.SetItemsFilter(filter)
return nl.GetAll()
}
// McStateSnapshot get snapshot of machine config state for the nodes in this list
// the output is like `Working Done Done`
func (nl NodeList) McStateSnapshot() string {
return nl.GetOrFail(`{.items[*].metadata.annotations.machineconfiguration\.openshift\.io/state}`)
}
// quietSetNamespacePrivileged invokes exutil.SetNamespacePrivileged but disable the logs output to avoid noise in the logs
func quietSetNamespacePrivileged(oc *exutil.CLI, namespace string) error {
oc.NotShowInfo()
defer oc.SetShowInfo()
logger.Debugf("Setting namespace %s as privileged", namespace)
return exutil.SetNamespacePrivileged(oc, namespace)
}
// quietRecoverNamespaceRestricted invokes exutil.RecoverNamespaceRestricted but disable the logs output to avoid noise in the logs
func quietRecoverNamespaceRestricted(oc *exutil.CLI, namespace string) error {
oc.NotShowInfo()
defer oc.SetShowInfo()
logger.Debugf("Recovering namespace %s from privileged", namespace)
return exutil.RecoverNamespaceRestricted(oc, namespace)
}
// BreakRebootInNode break the reboot process in a node, so that errors will happen when the node is rebooted
func BreakRebootInNode(node *Node) error {
logger.Infof("Breaking reboot process in node %s", node.GetName())
_, err := node.DebugNodeWithChroot("sh", "-c", "mount -o remount,rw /usr; mv /usr/bin/systemd-run /usr/bin/systemd-run2")
return err
}
// FixRebootInNode fixes the problem what BreaKRebootInNode function created in a node
func FixRebootInNode(node *Node) error {
logger.Infof("Fixing reboot process in node %s", node.GetName())
_, err := node.DebugNodeWithChroot("sh", "-c", "mount -o remount,rw /usr; mv /usr/bin/systemd-run2 /usr/bin/systemd-run")
return err
}
// BreakRebaseInNode break the rebase rpm-ostree subcommnad in a node, so that errors will happen when the node tries to rebase
func BreakRebaseInNode(node *Node) error {
logger.Infof("Breaking rpm-ostree rebase process in node %s", node.GetName())
brokenRpmOstree := generateTemplateAbsolutePath("rpm-ostree-force-pivot-error.sh")
node.CopyFromLocal(brokenRpmOstree, "/tmp/rpm-ostree.broken")
// It is very important the "-Z" option when we replace the original rpm-ostree file, because it will set the default selinux options for the file.
// If we don't use this "-Z" option and the file has the wrong selinux type the test will fail because "rpm-ostree kargs" command cannot succeed
_, err := node.DebugNodeWithChroot("sh", "-c", "mount -o remount,rw /usr; mv /usr/bin/rpm-ostree /usr/bin/rpm-ostree2 && mv -Z /tmp/rpm-ostree.broken /usr/bin/rpm-ostree && chmod +x /usr/bin/rpm-ostree")
return err
}
// FixRebaseInNode fixes the problem what BreaKRebootInNode function created in a node
func FixRebaseInNode(node *Node) error {
logger.Infof("Fixing rpm-ostree rebase process in node %s", node.GetName())
_, err := node.DebugNodeWithChroot("sh", "-c", "mount -o remount,rw /usr; mv /usr/bin/rpm-ostree2 /usr/bin/rpm-ostree")
return err
}
// FilterSchedulableNodes removes from a list of nodes the nodes that are not schedulable
func FilterSchedulableNodesOrFail(nodes []Node) []Node {
returnNodes := []Node{}
for _, item := range nodes {
node := item
if node.IsSchedulableOrFail() {
returnNodes = append(returnNodes, node)
}
}
return returnNodes
}
// GetOperatorNode returns the node running the MCO operator pod
func GetOperatorNode(oc *exutil.CLI) (*Node, error) {
podsList := NewNamespacedResourceList(oc.AsAdmin(), "pods", MachineConfigNamespace)
podsList.ByLabel("k8s-app=machine-config-operator")
mcoPods, err := podsList.GetAll()
if err != nil {
return nil, err
}
if len(mcoPods) != 1 {
return nil, fmt.Errorf("There should be 1 and only 1 MCO operator pod. Found operator pods: %s", mcoPods)
}
nodeName, err := mcoPods[0].Get(`{.spec.nodeName}`)
if err != nil {
return nil, err
}
return NewNode(oc, nodeName), nil
}
// ConfigureStreamCentosRepo configures a centos repo in the node using the provided stream
func ConfigureStreamCentosRepo(n Node, stream string) error {
var (
centosStreamTemplate = generateTemplateAbsolutePath("centos_stream.repo")
centosStreamRepoFile = "/etc/yum.repos.d/mco-test-centos.repo"
dnfVarDir = "/etc/dnf/vars/"
streamVarRemoteFile = NewRemoteFile(n, path.Join(dnfVarDir, "stream"))
proxy = NewResource(n.GetOC().AsAdmin(), "proxy", "cluster")
)
logger.Infof("Creating necessary repo files")
err := n.CopyFromLocal(centosStreamTemplate, centosStreamRepoFile)
if err != nil {
return err
}
_, err = n.DebugNodeWithChroot("mkdir", "-p", dnfVarDir)
if err != nil {
logger.Errorf("Error creating the dnf var directory %s: %s", dnfVarDir, err)
return err
}
err = streamVarRemoteFile.Create([]byte(stream), 0o600)
if err != nil {
logger.Errorf("Error writing the content <%s> in the dnf var file %s: %s", stream, streamVarRemoteFile, err)
return err
}
logger.Infof("Configuring proxy settings")
httpProxy, err := proxy.Get(`{.status.httpProxy}`)
if err != nil {
logger.Errorf("Error getting the httpProxy value")
return err
}
_, err = n.DebugNodeWithChroot("sed", "-i", "s#proxy=#proxy="+httpProxy+"#g", centosStreamRepoFile)
if err != nil {
logger.Errorf("Error configuring proxy in the centos repo file")
return err
}
return err
}
// RemoveConfiguredStreamCentosRepo removes the configuration added by ConfigureStreamCentosRepo restoring the original status
func RemoveConfiguredStreamCentosRepo(n Node) error {
logger.Infof("Remoing the stream centos repo configuration")
_, err := n.DebugNodeWithChroot("rm", "/etc/yum.repos.d/mco-test-centos.repo", "/etc/dnf/vars/stream")
return err
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
00665f10-125b-4a01-ae07-8853f170b861
|
NewNode
|
['"time"']
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func NewNode(oc *exutil.CLI, name string) *Node {
return &Node{*NewResource(oc, "node", name), time.Time{}}
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
fdfe19ae-abc2-42ac-9f1a-8bbfb11020f9
|
NewNodeList
|
['NodeList']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func NewNodeList(oc *exutil.CLI) *NodeList {
return &NodeList{*NewResourceList(oc, "node")}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
64d26318-c223-4fb8-869c-f786ad465b87
|
String
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n Node) String() string {
return n.GetName()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
2bea5b1d-b52a-476a-889d-56a01e433af5
|
DebugNodeWithChroot
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) DebugNodeWithChroot(cmd ...string) (string, error) {
var (
out string
err error
numRetries = 3
)
n.oc.NotShowInfo()
defer n.oc.SetShowInfo()
for i := 0; i < numRetries; i++ {
if i > 0 {
logger.Infof("Error happened: %s.\nRetrying command. Num retries: %d", err, i)
}
out, err = exutil.DebugNodeWithChroot(n.oc, n.name, cmd...)
if err == nil {
return out, nil
}
}
return out, err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
47968d19-3e48-4aab-87ca-6f0b6d4481ef
|
DebugNodeWithChrootStd
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) DebugNodeWithChrootStd(cmd ...string) (string, string, error) {
var (
stdout string
stderr string
err error
numRetries = 3
)
setErr := quietSetNamespacePrivileged(n.oc, n.oc.Namespace())
if setErr != nil {
return "", "", setErr
}
cargs := []string{"node/" + n.GetName(), "--", "chroot", "/host"}
cargs = append(cargs, cmd...)
for i := 0; i < numRetries; i++ {
if i > 0 {
logger.Infof("Error happened: %s.\nRetrying command. Num retries: %d", err, i)
}
stdout, stderr, err = n.oc.Run("debug").Args(cargs...).Outputs()
if err == nil {
return stdout, stderr, nil
}
}
recErr := quietRecoverNamespaceRestricted(n.oc, n.oc.Namespace())
if recErr != nil {
return "", "", recErr
}
return stdout, stderr, err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
c1dc2023-93e4-4a28-b572-76dd5741b701
|
DebugNodeWithOptions
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) DebugNodeWithOptions(options []string, cmd ...string) (string, error) {
var (
out string
err error
numRetries = 3
)
for i := 0; i < numRetries; i++ {
if i > 0 {
logger.Infof("Error happened: %s.\nRetrying command. Num retries: %d", err, i)
}
out, err = exutil.DebugNodeWithOptions(n.oc, n.name, options, cmd...)
if err == nil {
return out, nil
}
}
return out, err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
2297ceff-5870-406c-bb3c-5365d64b1eea
|
DebugNode
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) DebugNode(cmd ...string) (string, error) {
return exutil.DebugNode(n.oc, n.name, cmd...)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
e5f4f51f-ab58-46af-95ad-46b3d851fe58
|
DeleteLabel
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) DeleteLabel(label string) (string, error) {
logger.Infof("Delete label %s from node %s", label, n.GetName())
return exutil.DeleteLabelFromNode(n.oc, n.name, label)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
0f2f8331-3006-4bbb-bdf7-090408538fb2
|
WaitForLabelRemoved
|
['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"']
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) WaitForLabelRemoved(label string) error {
logger.Infof("Waiting for label %s to be removed from node %s", label, n.GetName())
immediate := true
waitErr := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, 10*time.Minute, immediate, func(_ context.Context) (bool, error) {
labels, err := n.Get(`{.metadata.labels}`)
if err != nil {
logger.Infof("Error waiting for labels to be removed:%v, and try next round", err)
return false, nil
}
labelsMap := JSON(labels)
label, err := labelsMap.GetSafe(label)
if err == nil && !label.Exists() {
logger.Infof("Label %s has been removed from node %s", label, n.GetName())
return true, nil
}
return false, nil
})
if waitErr != nil {
logger.Errorf("Timeout while waiting for label %s to be delete from node %s. Error: %s",
label,
n.GetName(),
waitErr)
}
return waitErr
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
c23a1a1c-f2d4-46cf-a488-177bea6600fe
|
GetMachineConfigDaemon
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) GetMachineConfigDaemon() string {
machineConfigDaemon, err := exutil.GetPodName(n.oc, "openshift-machine-config-operator", "k8s-app=machine-config-daemon", n.name)
o.Expect(err).NotTo(o.HaveOccurred())
return machineConfigDaemon
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
354d57c1-623f-4f3f-b68b-d4efe4ddc8a7
|
GetNodeHostname
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) GetNodeHostname() (string, error) {
return exutil.GetNodeHostname(n.oc, n.name)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
ac1bae06-cefa-4d88-a15a-d8f90978ca62
|
ForceReapplyConfiguration
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) ForceReapplyConfiguration() error {
logger.Infof("Forcing reapply configuration in node %s", n.GetName())
_, err := n.DebugNodeWithChroot("touch", "/run/machine-config-daemon-force")
return err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d4a4a2b7-042f-496b-aaa1-ef39fc210d7a
|
GetUnitStatus
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) GetUnitStatus(unitName string) (string, error) {
return n.DebugNodeWithChroot("systemctl", "status", unitName)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
66052d56-d265-4048-ac5a-a4a574b7e6c2
|
UnmaskService
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) UnmaskService(svcName string) (string, error) {
return n.DebugNodeWithChroot("systemctl", "unmask", svcName)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
91a37ff7-2949-441f-90ff-7fda5a293da8
|
GetUnitProperties
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) GetUnitProperties(unitName string, args ...string) (string, error) {
cmd := append([]string{"systemctl", "show", unitName}, args...)
stdout, _, err := n.DebugNodeWithChrootStd(cmd...)
return stdout, err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
273ca471-f49e-4775-8089-4065b64c0af3
|
GetUnitActiveEnterTime
|
['"fmt"', '"regexp"', '"strconv"', '"time"']
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) GetUnitActiveEnterTime(unitName string) (time.Time, error) {
cmdOut, err := n.GetUnitProperties(unitName, "--timestamp=unix", "-P", "ActiveEnterTimestamp")
if err != nil {
return time.Time{}, err
}
logger.Infof("Active enter time output: [%s]", cmdOut)
// The output should have this format
// sh-5.1# systemctl show crio.service --timestamp=unix -P ActiveEnterTimestamp
// @1709918801
r := regexp.MustCompile(`^\@(?P<unix_timestamp>[0-9]+)$`)
match := r.FindStringSubmatch(cmdOut)
if len(match) == 0 {
msg := fmt.Sprintf("Wrong property format. Expected a format like '@1709918801', but got '%s'", cmdOut)
logger.Infof(msg)
return time.Time{}, fmt.Errorf(msg)
}
unixTimeIndex := r.SubexpIndex("unix_timestamp")
unixTime := match[unixTimeIndex]
iUnixTime, err := strconv.ParseInt(unixTime, 10, 64)
if err != nil {
return time.Time{}, err
}
activeEnterTime := time.Unix(iUnixTime, 0)
logger.Infof("Unit %s ActiveEnterTimestamp %s", unitName, activeEnterTime)
return activeEnterTime, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
0628babb-76c3-4393-8258-63648105b67c
|
GetUnitExecReloadStartTime
|
['"fmt"', '"regexp"', '"strconv"', '"strings"', '"time"']
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) GetUnitExecReloadStartTime(unitName string) (time.Time, error) {
cmdOut, err := n.GetUnitProperties(unitName, "--timestamp=unix", "-P", "ExecReload")
if err != nil {
return time.Time{}, err
}
logger.Infof("Reload start time output: [%s]", cmdOut)
// The output should have this format
// sh-5.1# systemctl show crio.service --timestamp=unix -P ExecReload
// @1709918801
r := regexp.MustCompile(`start_time=\[(?P<unix_timestamp>@[0-9]+|n\/a)\]`)
match := r.FindStringSubmatch(cmdOut)
if len(match) == 0 {
msg := fmt.Sprintf("Wrong property format. Expected a format like 'start_time=[@1709918801]', but got '%s'", cmdOut)
logger.Infof(msg)
return time.Time{}, fmt.Errorf(msg)
}
unixTimeIndex := r.SubexpIndex("unix_timestamp")
unixTime := match[unixTimeIndex]
if unixTime == "n/a" {
logger.Infof("Crio was never reloaded. Reload Start Time = %s", unixTime)
return time.Time{}, nil
}
iUnixTime, err := strconv.ParseInt(strings.Replace(unixTime, "@", "", 1), 10, 64)
if err != nil {
return time.Time{}, err
}
activeEnterTime := time.Unix(iUnixTime, 0)
logger.Infof("Unit %s ExecReload start time %s", unitName, activeEnterTime)
return activeEnterTime, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
614e727e-7074-47de-935f-3055fa61ff7c
|
GetUnitDependencies
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) GetUnitDependencies(unitName string, opts ...string) (string, error) {
options := []string{"systemctl", "list-dependencies", unitName}
if len(opts) > 0 {
options = append(options, opts...)
}
return n.DebugNodeWithChroot(options...)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
58e43a5b-09e5-42b7-afd6-b1a925407b75
|
IsUnitActive
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) IsUnitActive(unitName string) bool {
output, _, err := n.DebugNodeWithChrootStd("systemctl", "is-active", unitName)
if err != nil {
logger.Errorf("Get unit state for %s failed: %v", unitName, err)
return false
}
logger.Infof("Unit %s state is: %s", unitName, output)
return output == "active"
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
b9724f09-0573-4b03-b07f-832556cf24f6
|
IsUnitEnabled
|
['"strings"']
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) IsUnitEnabled(unitName string) bool {
output, _, err := n.DebugNodeWithChrootStd("systemctl", "is-enabled", unitName)
if err != nil {
logger.Errorf("Get unit enablement state for %s failed: %v", unitName, err)
return false
}
logger.Infof("Unit %s enablement state is: %s ", unitName, output)
return strings.HasPrefix(output, "enabled")
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
f9ccaa91-69f7-4f44-9329-c7ba63cadbf3
|
GetRpmOstreeStatus
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n Node) GetRpmOstreeStatus(asJSON bool) (string, error) {
args := []string{"rpm-ostree", "status"}
if asJSON {
args = append(args, "--json")
}
stringStatus, _, err := n.DebugNodeWithChrootStd(args...)
logger.Debugf("json rpm-ostree status:\n%s", stringStatus)
return stringStatus, err
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
5c3d2e84-1753-4a58-bad7-e6461f85d026
|
GetBootedOsTreeDeployment
|
['"strings"']
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n Node) GetBootedOsTreeDeployment(asJSON bool) (string, error) {
if asJSON {
stringStatus, err := n.GetRpmOstreeStatus(true)
if err != nil {
return "", err
}
deployments := JSON(stringStatus).Get("deployments")
for _, item := range deployments.Items() {
booted := item.Get("booted").ToBool()
if booted {
return item.AsJSONString()
}
}
} else {
stringStatus, err := n.GetRpmOstreeStatus(false)
if err != nil {
return "", err
}
deployments := strings.Split(stringStatus, "\n\n")
for _, deployment := range deployments {
if strings.Contains(deployment, "*") {
return deployment, nil
}
}
}
logger.Infof("WARNING! No booted deployment found in node %s", n.GetName())
return "", nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
f23cf788-0093-4e15-aca2-ae2b97184630
|
GetCurrentBootOSImage
|
['"fmt"', '"path"', '"strconv"', '"strings"']
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n Node) GetCurrentBootOSImage() (string, error) {
deployment, err := n.GetBootedOsTreeDeployment(true)
if err != nil {
return "", fmt.Errorf("Error getting the rpm-ostree status value.\n%s", err)
}
containerRef, jerr := JSON(deployment).GetSafe("container-image-reference")
if jerr != nil {
return "", fmt.Errorf("We cant get 'container-image-reference' from the deployment status. Wrong rpm-ostree status!.\n%s\n%s", jerr, deployment)
}
logger.Infof("Current booted container-image-reference: %s", containerRef)
imageSplit := strings.Split(containerRef.ToString(), ":")
lenImageSplit := len(imageSplit)
if lenImageSplit < 2 {
return "", fmt.Errorf("Wrong container-image-reference in deployment:\n%s\n%s", err, deployment)
}
// remove the "ostree-unverified-registry:" part of the image
// remove the "containers-storage:" part of the image
// it can have these modifiers: ostree-unverified-image:containers-storage:quay.io/openshift-.....
// we need to take into account this kind of images too -> ostree-unverified-registry:image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/ocb-worker-image@sha256:da29d9033c...
image := imageSplit[lenImageSplit-2] + ":" + imageSplit[lenImageSplit-1]
// we need to check if the image includes the port too
if lenImageSplit > 2 {
_, err := strconv.Atoi(strings.Split(image, "/")[0])
// the image url includes the port. It is in the format my.doamin:port/my/path
if err == nil {
image = imageSplit[lenImageSplit-3] + ":" + image
}
}
image = strings.TrimSpace(image)
logger.Infof("Booted image: %s", image)
return image, nil
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
790c50c6-2ac7-41a9-8be0-3cff28189b26
|
Cordon
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) Cordon() error {
return n.oc.Run("adm").Args("cordon", n.GetName()).Execute()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
057e3ac5-e703-40cb-8f11-39e8c5cc72b6
|
Uncordon
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) Uncordon() error {
return n.oc.Run("adm").Args("uncordon", n.GetName()).Execute()
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
2ef101eb-2309-4c02-8072-13a7309f3830
|
IsCordoned
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) IsCordoned() (bool, error) {
key, err := n.Get(`{.spec.taints[?(@.key=="node.kubernetes.io/unschedulable")].key}`)
if err != nil {
return false, err
}
return key != "", nil
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
8510a307-9689-418f-9371-6c5b74724441
|
IsCordonedOrFail
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) IsCordonedOrFail() bool {
isCordoned, err := n.IsCordoned()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the tains in node %s", n.GetName())
return isCordoned
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
d3441bff-0f2a-420a-9bec-70fda100470f
|
RestoreDesiredConfig
|
['"fmt"']
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n *Node) RestoreDesiredConfig() error {
currentConfig := n.GetCurrentMachineConfig()
if currentConfig == "" {
return fmt.Errorf("currentConfig annotation has an empty value in node %s", n.GetName())
}
logger.Infof("Node: %s. Restoring desiredConfig value to match currentConfig value: %s", n.GetName(), currentConfig)
currentImage := n.GetCurrentImage()
if currentImage == "" {
return n.PatchDesiredConfig(currentConfig)
}
logger.Infof("Node: %s. Restoring desiredImage value to match currentImage value: %s", n.GetName(), currentImage)
return n.PatchDesiredConfigAndDesiredImage(currentConfig, currentImage)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
8a060806-d742-439f-beb1-9ff86667339d
|
GetCurrentMachineConfig
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n Node) GetCurrentMachineConfig() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/currentConfig}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
e5adc6e6-e411-46ce-925a-36ba64fded69
|
GetCurrentImage
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n Node) GetCurrentImage() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/currentImage}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
646daee4-d7a7-44bf-9535-f045a1ed7e71
|
GetDesiredMachineConfig
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n Node) GetDesiredMachineConfig() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/desiredConfig}`)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
0e42b2e3-68a2-4326-8319-44137afce533
|
GetMachineConfigState
|
['Node']
|
github.com/openshift/openshift-tests-private/test/extended/mco/node.go
|
func (n Node) GetMachineConfigState() string {
return n.GetOrFail(`{.metadata.annotations.machineconfiguration\.openshift\.io/state}`)
}
|
mco
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.