element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function
|
openshift/openshift-tests-private
|
d6556a16-2024-476c-ad30-480d5ec43cfe
|
DeleteMcConfigMap
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) DeleteMcConfigMap() {
exutil.By("delete config map")
cmName := ht.StrValue(TestCtxKeyConfigMap)
NewNamespacedResource(ht.oc.AsAdmin(), "cm", ht.clusterNS, cmName).DeleteOrFail()
logger.Infof("config map %s is deleted successfully", cmName)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
a9eb506f-545e-4d36-b60f-276c924cadee
|
PatchNodePoolToTriggerUpdate
|
['"fmt"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) PatchNodePoolToTriggerUpdate() {
exutil.By("patch node pool to add config setting")
npName := ht.StrValue(HypershiftCrNodePool)
cmName := ht.StrValue(TestCtxKeyConfigMap)
np := NewHypershiftNodePool(ht.oc.AsAdmin(), ht.clusterNS, npName)
o.Expect(np.Patch("merge", fmt.Sprintf(`{"spec":{"config":[{"name": "%s"}]}}`, cmName))).NotTo(o.HaveOccurred(), "patch node pool with cm setting failed")
o.Expect(np.GetOrFail(`{.spec.config}`)).Should(o.ContainSubstring(cmName), "node pool does not have cm config")
logger.Debugf(np.PrettyString())
exutil.By("wait node pool update to complete")
np.WaitUntilConfigIsUpdating()
np.WaitUntilConfigUpdateIsCompleted()
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
98373ecd-be71-4d30-9453-cb231aaeb9a0
|
PatchNodePoolToUpdateReleaseImage
|
['"fmt"', '"strings"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) PatchNodePoolToUpdateReleaseImage() {
exutil.By("patch node pool to update release image")
npName := ht.StrValue(HypershiftCrNodePool)
np := NewHypershiftNodePool(ht.oc.AsAdmin(), ht.clusterNS, npName)
versionSlice := strings.Split(np.GetVersion(), ".")
imageURL, version := getLatestImageURL(ht.oc, fmt.Sprintf("%s.%s", versionSlice[0], versionSlice[1])) // get latest nightly build based on release version
o.Expect(np.Patch("merge", fmt.Sprintf(`{"spec":{"release":{"image": "%s"}}}`, imageURL))).NotTo(o.HaveOccurred(), "patch node pool with release image failed")
o.Expect(np.GetOrFail(`{.spec.release.image}`)).Should(o.ContainSubstring(imageURL), "node pool does not have update release image config")
logger.Debugf(np.PrettyString())
exutil.By("wait node pool update to complete")
np.WaitUntilVersionIsUpdating()
np.WaitUntilVersionUpdateIsCompleted()
o.Expect(np.GetVersion()).Should(o.Equal(version), "version of node pool is not updated correctly")
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
f5f719d9-1214-4948-a6b1-b129093a7fc4
|
PatchNodePoolToUpdateMaxUnavailable
|
['"fmt"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) PatchNodePoolToUpdateMaxUnavailable(maxUnavailable string) {
exutil.By("patch node pool to update property spec.management.inPlace.maxUnavailable and spec.config")
npName := ht.StrValue(HypershiftCrNodePool)
cmName := ht.StrValue(TestCtxKeyConfigMap)
// update maxUnavailable
np := NewHypershiftNodePool(ht.oc.AsAdmin(), ht.clusterNS, npName)
o.Expect(np.Patch("merge", fmt.Sprintf(`{"spec":{"management":{"inPlace":{"maxUnavailable":%s}}}}`, maxUnavailable))).NotTo(o.HaveOccurred(), "patch node pool with maxUnavailable setting failed")
o.Expect(np.GetOrFail(`{.spec.management.inPlace}`)).Should(o.ContainSubstring("maxUnavailable"), "node pool does not have maxUnavailable config")
// update config
o.Expect(np.Patch("merge", fmt.Sprintf(`{"spec":{"config":[{"name": "%s"}]}}`, cmName))).NotTo(o.HaveOccurred(), "patch node pool with cm setting failed")
o.Expect(np.GetOrFail(`{.spec.config}`)).Should(o.ContainSubstring(cmName), "node pool does not have cm config")
logger.Debugf(np.PrettyString())
exutil.By("check node pool update is started")
np.WaitUntilConfigIsUpdating()
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
331f933c-099f-4810-a8b5-900375c41631
|
CheckNodesAreUpdatingInParallel
|
['"fmt"', '"strings"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) CheckNodesAreUpdatingInParallel(nodeNum int) {
npName := ht.StrValue(HypershiftCrNodePool)
np := NewHypershiftNodePool(ht.oc.AsAdmin(), ht.clusterNS, npName)
defer np.WaitUntilConfigUpdateIsCompleted()
exutil.By(fmt.Sprintf("checking whether nodes are updating in parallel, expected node num is %v", nodeNum))
kubeconf := ht.StrValue(TestCtxKeyKubeConfig)
ht.oc.SetGuestKubeconf(kubeconf)
nodesInfo := ""
mcStatePoller := func() int {
nodeStates := NewNodeList(ht.oc.AsAdmin().AsGuestKubeconf()).McStateSnapshot()
logger.Infof("machine-config state of all hosted cluster nodes: %s", nodeStates)
nodesInfo, _ = ht.oc.AsAdmin().AsGuestKubeconf().Run("get").Args("node").Output()
return strings.Count(nodeStates, "Working")
}
o.Eventually(mcStatePoller, "3m", "10s").Should(o.BeNumerically("==", nodeNum), "updating node num not equal to maxUnavailable value.\n%s", nodesInfo)
o.Consistently(mcStatePoller, "8m", "10s").Should(o.BeNumerically("<=", nodeNum), "updating node num is greater than maxUnavailable value.\n%s", nodesInfo)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
248478c5-d31b-482a-9703-26666844ba2a
|
CreateKubeConfigForCluster
|
['"fmt"', '"path/filepath"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) CreateKubeConfigForCluster() {
exutil.By("create kubeconfig for hosted cluster")
clusterName := ht.StrValue(TestCtxKeyCluster)
file := filepath.Join(ht.dir, fmt.Sprintf("%s-kubeconfig", clusterName))
_, err := ht.cli.CreateKubeConfig(clusterName, ht.clusterNS, file)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("create kubeconfig for cluster %s failed", clusterName))
logger.Infof("kubeconfig of cluster %s is saved to %s", clusterName, file)
ht.Put(TestCtxKeyKubeConfig, file)
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
3675e872-a266-4399-801e-6fa473e570ca
|
CheckMcAnnotationsOnNode
|
['"fmt"', '"strings"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) CheckMcAnnotationsOnNode() {
exutil.By("check machine config annotation to verify update is done")
clusterName := ht.StrValue(TestCtxKeyCluster)
kubeconf := ht.StrValue(TestCtxKeyKubeConfig)
npName := ht.StrValue(HypershiftCrNodePool)
ht.oc.SetGuestKubeconf(kubeconf)
np := NewHypershiftNodePool(ht.oc.AsAdmin().AsGuestKubeconf(), ht.clusterNS, npName)
workerNode := np.GetAllLinuxNodesOrFail()[0]
// get machine config name
secrets := NewNamespacedResourceList(ht.oc.AsAdmin(), "secrets", fmt.Sprintf("%s-%s", ht.clusterNS, clusterName))
secrets.SortByTimestamp()
secrets.ByFieldSelector("type=Opaque")
secrets.SetItemsFilter("-1:")
filterdSecrets, getSecretErr := secrets.GetAll()
o.Expect(getSecretErr).NotTo(o.HaveOccurred(), "Get latest secret failed")
userDataSecretName := filterdSecrets[0].GetName()
logger.Infof("get latest user-data secret name %s", userDataSecretName)
// mc name is suffix of the secret name e.g. user-data-inplace-upgrade-fe5d465e
tempSlice := strings.Split(userDataSecretName, "-")
mcName := tempSlice[len(tempSlice)-1]
logger.Infof("machine config name is %s", mcName)
logger.Debugf(workerNode.PrettyString())
desiredConfig := workerNode.GetAnnotationOrFail(NodeAnnotationDesiredConfig)
currentConfig := workerNode.GetAnnotationOrFail(NodeAnnotationCurrentConfig)
desiredDrain := workerNode.GetAnnotationOrFail(NodeAnnotationDesiredDrain)
lastAppliedDrain := workerNode.GetAnnotationOrFail(NodeAnnotationLastAppliedDrain)
reason := workerNode.GetAnnotationOrFail(NodeAnnotationReason)
state := workerNode.GetAnnotationOrFail(NodeAnnotationState)
drainReqID := fmt.Sprintf("uncordon-%s", mcName)
// do assertion for annotations, expected result is like below
// desiredConfig == currentConfig
o.Expect(currentConfig).Should(o.Equal(desiredConfig), "current config not equal to desired config")
// desiredConfig = $mcName
o.Expect(desiredConfig).Should(o.Equal(mcName))
// currentConfig = $mcName
o.Expect(currentConfig).Should(o.Equal(mcName))
// desiredDrain == lastAppliedDrain
o.Expect(desiredDrain).Should(o.Equal(lastAppliedDrain), "desired drain not equal to last applied drain")
// desiredDrain = uncordon-$mcName
o.Expect(desiredDrain).Should(o.Equal(drainReqID), "desired drain id is not expected")
// lastAppliedDrain = uncordon-$mcName
o.Expect(lastAppliedDrain).Should(o.Equal(drainReqID), "last applied drain id is not expected")
// reason is empty
o.Expect(reason).Should(o.BeEmpty(), "reason is not empty")
// state is 'Done'
o.Expect(state).Should(o.Equal("Done"))
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
dcd0f30a-7756-4a65-97cf-3d2bbd8297e5
|
VerifyFileContent
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) VerifyFileContent() {
exutil.By("check whether the test file content is matched ")
filePath := ht.StrValue(TestCtxKeyFilePath)
kubeconf := ht.StrValue(TestCtxKeyKubeConfig)
npName := ht.StrValue(HypershiftCrNodePool)
ht.oc.SetGuestKubeconf(kubeconf)
np := NewHypershiftNodePool(ht.oc.AsAdmin().AsGuestKubeconf(), ht.clusterNS, npName)
workerNode := np.GetAllLinuxNodesOrFail()[0]
// when we call oc debug with guest kubeconfig, temp namespace oc.Namespace()
// cannot be found in hosted cluster.
// copy node object to change namespace to default
clonedNode := workerNode
clonedNode.oc.SetNamespace("default")
rf := NewRemoteFile(clonedNode, filePath)
o.Expect(rf.Fetch()).NotTo(o.HaveOccurred(), "fetch remote file failed")
o.Expect(rf.GetTextContent()).Should(o.ContainSubstring("hello world"), "file content does not match machine config setting")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
81aae17f-f3bd-4d8f-8d81-2e571f249368
|
skipTestIfLatestAcceptedBuildIsSameAsHostedClusterVersion
|
['"fmt"', '"strings"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) skipTestIfLatestAcceptedBuildIsSameAsHostedClusterVersion() {
// OCPQE-17034, if latest accepted build is same as hosted cluster version. i.e. it is nodepool version as well
// release image update will not happen, skip this case.
// Get hosted cluster version
hostedclusterName := ht.StrValue(TestCtxKeyCluster)
hostedcluster := NewHypershiftHostedCluster(ht.oc.AsAdmin(), ht.clusterNS, hostedclusterName)
hostedclusterVersion := hostedcluster.GetVersion()
// Get latest accepted build
_, latestAcceptedBuild := getLatestImageURL(ht.oc, strings.Join(strings.Split(hostedclusterVersion, ".")[:2], "."))
if hostedclusterVersion == latestAcceptedBuild {
g.Skip(fmt.Sprintf("latest accepted build [%s] is same as hosted cluster version [%s], cannot update release image, skip this case", latestAcceptedBuild, hostedclusterVersion))
}
}
|
mco
| |||
function
|
openshift/openshift-tests-private
|
14e2a013-b5d8-4253-9675-77e0297b88db
|
getHypershiftImage
|
['"fmt"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
['HypershiftTest']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
func (ht *HypershiftTest) getHypershiftImage() string {
// get minor release version as image tag
// imageTag, _, cvErr := exutil.GetClusterVersion(ht.oc)
// o.Expect(cvErr).NotTo(o.HaveOccurred(), "Get minor release version error")
// Becaseu of https://issues.redhat.com/browse/OCPQE-26256 we will always use the "latest" image
imageTag := "latest"
repo := "quay.io/hypershift/hypershift-operator"
arch := architecture.ClusterArchitecture(ht.oc)
if arch == architecture.ARM64 || arch == architecture.MULTI {
repo = "quay.io/acm-d/rhtap-hypershift-operator"
}
image := fmt.Sprintf("%s:%s", repo, imageTag)
logger.Infof("Hypershift image is: %s", image)
return image
}
|
mco
| |||
test case
|
openshift/openshift-tests-private
|
c836055a-f32c-451f-99a3-f58881c81671
|
Author:rioliu-Longduration-HyperShiftMGMT-NonPreRelease-High-54328-hypershift Add new file on hosted cluster node via config map [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
g.It("Author:rioliu-Longduration-HyperShiftMGMT-NonPreRelease-High-54328-hypershift Add new file on hosted cluster node via config map [Disruptive]", func() {
// create node pool with replica=2
// destroy node pool then delete config map
defer ht.DeleteMcConfigMap()
defer ht.DestroyNodePoolOnAws()
ht.CreateNodePoolOnAws("2")
// create config map which contains machine config
ht.CreateMcConfigMap()
// patch node pool to update config name with new config map
ht.PatchNodePoolToTriggerUpdate()
// create kubeconfig for hosted cluster
ht.CreateKubeConfigForCluster()
// check machine config annotations on nodes to make sure update is done
ht.CheckMcAnnotationsOnNode()
// check file content on hosted cluster nodes
ht.VerifyFileContent()
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0a046b59-27c4-45a6-8266-f350f3cc9260
|
Author:rioliu-Longduration-HyperShiftMGMT-NonPreRelease-High-54366-hypershift Update release image of node pool [Disruptive]
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
g.It("Author:rioliu-Longduration-HyperShiftMGMT-NonPreRelease-High-54366-hypershift Update release image of node pool [Disruptive]", func() {
// check arch, only support amd64
architecture.SkipNonAmd64SingleArch(oc)
// check latest accepted build, if it is same as hostedcluster version, skip this case
ht.skipTestIfLatestAcceptedBuildIsSameAsHostedClusterVersion()
// create a nodepool with 2 replicas and enable in place upgrade
defer ht.DestroyNodePoolOnAws()
ht.CreateNodePoolOnAws("2")
// patch nodepool with latest nightly build and wait until version update to complete
// compare nodepool version and build version. they should be same
ht.PatchNodePoolToUpdateReleaseImage()
// create kubeconfig for hosted cluster
ht.CreateKubeConfigForCluster()
// check machine config annotations on nodes to make sure update is done
ht.CheckMcAnnotationsOnNode()
})
| |||||
test case
|
openshift/openshift-tests-private
|
f5e4a4e3-a49d-436c-99a0-cb5596cefce9
|
Author:rioliu-Longduration-HyperShiftMGMT-NonPreRelease-High-55356-[P1] hypershift Honor MaxUnavailable for inplace upgrades [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_hypershift.go
|
g.It("Author:rioliu-Longduration-HyperShiftMGMT-NonPreRelease-High-55356-[P1] hypershift Honor MaxUnavailable for inplace upgrades [Disruptive]", func() {
// create node pool with replica=3
// destroy node pool then delete config map
defer ht.DeleteMcConfigMap()
defer ht.DestroyNodePoolOnAws()
ht.CreateNodePoolOnAws("3") // TODO: change the replica to 5 when bug https://issues.redhat.com/browse/OCPBUGS-2870 is fixed
// create config map which contains machine config
ht.CreateMcConfigMap()
// patch node pool to update config name with new config map
ht.PatchNodePoolToUpdateMaxUnavailable("2")
// create kubeconfig for hosted cluster
ht.CreateKubeConfigForCluster()
// check whether nodes are updating in parallel
ht.CheckNodesAreUpdatingInParallel(2)
})
| ||||||
test
|
openshift/openshift-tests-private
|
7755a32b-4dce-4435-a7b3-be0ab9334fa7
|
mco_layering
|
import (
"fmt"
"os"
"path/filepath"
"regexp"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
package mco
import (
"fmt"
"os"
"path/filepath"
"regexp"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
var _ = g.Describe("[sig-mco] MCO Layering", func() {
defer g.GinkgoRecover()
var (
// init cli object, temp namespace contains prefix mco.
// tip: don't put this in BeforeEach/JustBeforeEach, you will get error
// "You may only call AfterEach from within a Describe, Context or When"
oc = exutil.NewCLI("mco-layering", exutil.KubeConfigPath())
// temp dir to store all test files, and it will be recycled when test is finished
tmpdir string
)
g.JustBeforeEach(func() {
tmpdir = createTmpDir()
preChecks(oc)
})
g.JustAfterEach(func() {
os.RemoveAll(tmpdir)
logger.Infof("test dir %s is cleaned up", tmpdir)
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Critical-54085-[P1] Update osImage changing /etc /usr and rpm [Disruptive]", func() {
architecture.SkipArchitectures(oc, architecture.MULTI, architecture.S390X, architecture.PPC64LE)
// Because of https proxies using their own user-ca certificate, we need to take into account the openshift-config-user-ca-bundle.crt file
coreOSMcp := GetCoreOsCompatiblePool(oc.AsAdmin())
node := coreOSMcp.GetCoreOsNodesOrFail()[0]
dockerFileCommands := `
RUN mkdir /etc/tc_54085 && chmod 3770 /etc/tc_54085 && ostree container commit
RUN echo 'Test case 54085 test file' > /etc/tc54085.txt && chmod 5400 /etc/tc54085.txt && ostree container commit
RUN echo 'echo "Hello world"' > /usr/bin/tc54085_helloworld && chmod 5770 /usr/bin/tc54085_helloworld && ostree container commit
COPY openshift-config-user-ca-bundle.crt /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt
RUN update-ca-trust && \
rm /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt && \
cd /etc/yum.repos.d/ && curl -LO https://pkgs.tailscale.com/stable/fedora/tailscale.repo && \
rpm-ostree install tailscale && rpm-ostree cleanup -m && \
systemctl enable tailscaled && \
ostree container commit
`
// Capture current rpm-ostree status
exutil.By("Capture the current ostree deployment")
initialBootedImage, err := node.GetCurrentBootOSImage()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the initial booted image")
logger.Infof("OK\n")
// Build the new osImage
osImageBuilder := OsImageBuilderInNode{node: node, dockerFileCommands: dockerFileCommands}
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create MC and wait for MCP
exutil.By("Create a MC to deploy the new osImage")
layeringMcName := "layering-mc"
layeringMC := NewMachineConfig(oc.AsAdmin(), layeringMcName, coreOSMcp.GetName())
layeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
layeringMC.skipWaitForMcp = true
defer layeringMC.delete()
layeringMC.create()
coreOSMcp.waitForComplete()
logger.Infof("The new osImage was deployed successfully\n")
// Check rpm-ostree status
exutil.By("Check that the rpm-ostree status is reporting the right booted image")
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(digestedImage),
"The booted image resported by rpm-ostree status is not the expected one")
logger.Infof("OK!\n")
// Check image content
exutil.By("Load remote resources to verify that the osImage content has been deployed properly")
tc54085Dir := NewRemoteFile(node, "/etc/tc_54085")
tc54085File := NewRemoteFile(node, "/etc/tc54085.txt")
binHelloWorld := NewRemoteFile(node, "/usr/bin/tc54085_helloworld")
o.Expect(tc54085Dir.Fetch()).ShouldNot(o.HaveOccurred(),
"Error getting information about file %s in node %s", tc54085Dir.GetFullPath(), node.GetName())
o.Expect(tc54085File.Fetch()).ShouldNot(o.HaveOccurred(),
"Error getting information about file %s in node %s", tc54085File.GetFullPath(), node.GetName())
o.Expect(binHelloWorld.Fetch()).ShouldNot(o.HaveOccurred(),
"Error getting information about file %s in node %s", binHelloWorld.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the directory in /etc exists and has the right permissions")
o.Expect(tc54085Dir.IsDirectory()).To(o.BeTrue(),
"Error, %s in node %s is not a directory", tc54085Dir.GetFullPath(), node.GetName())
o.Expect(tc54085Dir.GetNpermissions()).To(o.Equal("3770"),
"Error, permissions of %s in node %s are not the expected ones", tc54085Dir.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the file in /etc exists and has the right permissions")
o.Expect(tc54085File.GetNpermissions()).To(o.Equal("5400"),
"Error, permissions of %s in node %s are not the expected ones", tc54085File.GetFullPath(), node.GetName())
o.Expect(tc54085File.GetTextContent()).To(o.Equal("Test case 54085 test file\n"),
"Error, content of %s in node %s are not the expected one", tc54085File.GetFullPath(), node.GetName())
exutil.By("Check that the file in /usr/bin exists, has the right permissions and can be executed")
o.Expect(binHelloWorld.GetNpermissions()).To(o.Equal("5770"),
"Error, permissions of %s in node %s are not the expected ones", tc54085File.GetFullPath(), node.GetName())
output, herr := node.DebugNodeWithChroot("/usr/bin/tc54085_helloworld")
o.Expect(herr).NotTo(o.HaveOccurred(),
"Error executing 'hello world' executable file /usr/bin/tc54085_helloworld")
o.Expect(output).To(o.ContainSubstring("Hello world"),
"Error, 'Hellow world' executable file's output was not the expected one")
logger.Infof("OK!\n")
exutil.By("Check that the tailscale rpm has been deployed")
tailscaledRpm, rpmErr := node.DebugNodeWithChroot("rpm", "-q", "tailscale")
o.Expect(rpmErr).NotTo(o.HaveOccurred(),
"Error, getting the installed rpms in node %s. 'tailscale' rpm is not installed.", node.GetName())
o.Expect(tailscaledRpm).To(o.ContainSubstring("tailscale-"),
"Error, 'tailscale' rpm is not installed in node %s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the tailscaled.service unit is loaded, active and enabled")
tailscaledStatus, unitErr := node.GetUnitStatus("tailscaled.service")
o.Expect(unitErr).NotTo(o.HaveOccurred(),
"Error getting the status of the 'tailscaled.service' unit in node %s", node.GetName())
o.Expect(tailscaledStatus).Should(
o.And(
o.ContainSubstring("tailscaled.service"),
o.ContainSubstring("Active: active"), // is active
o.ContainSubstring("Loaded: loaded"), // is loaded
o.ContainSubstring("; enabled;")), // is enabled
"tailscaled.service unit should be loaded, active and enabled and it is not")
logger.Infof("OK!\n")
// Delete the MC and wait for MCP
exutil.By("Delete the MC so that the original osImage is restored")
layeringMC.delete()
coreOSMcp.waitForComplete()
logger.Infof("MC was successfully deleted\n")
// Check the rpm-ostree status after the MC deletion
exutil.By("Check that the original ostree deployment was restored")
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(initialBootedImage),
"Error! the initial osImage was not properly restored after deleting the MachineConfig")
logger.Infof("OK!\n")
// Check the image content after the MC deletion
exutil.By("Check that the directory in /etc does not exist anymore")
o.Expect(tc54085Dir.Fetch()).Should(o.HaveOccurred(),
"Error, file %s should not exist in node %s, but it exists", tc54085Dir.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the file in /etc does not exist anymore")
o.Expect(tc54085File.Fetch()).Should(o.HaveOccurred(),
"Error, file %s should not exist in node %s, but it exists", tc54085File.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the file in /usr/bin does not exist anymore")
o.Expect(binHelloWorld.Fetch()).Should(o.HaveOccurred(),
"Error, file %s should not exist in node %s, but it exists", binHelloWorld.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the tailscale rpm is not installed anymore")
tailscaledRpm, rpmErr = node.DebugNodeWithChroot("rpm", "-q", "tailscale")
o.Expect(rpmErr).To(o.HaveOccurred(),
"Error, 'tailscale' rpm should not be installed in node %s, but it is installed.\n Output %s", node.GetName(), tailscaledRpm)
logger.Infof("OK!\n")
exutil.By("Check that the tailscaled.service is not present anymore")
tailscaledStatus, unitErr = node.GetUnitStatus("tailscaled.service")
o.Expect(unitErr).To(o.HaveOccurred(),
"Error, 'tailscaled.service' unit should not be available in node %s, but it is.\n Output %s", node.GetName(), tailscaledStatus)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-ConnectedOnly-NonPreRelease-Longduration-Medium-54052-[P2] Not bootable layered osImage provided[Disruptive]", func() {
var (
nonBootableImage = "quay.io/openshifttest/hello-openshift:1.2.0"
layeringMcName = "not-bootable-image-tc54052"
expectedNDMessage = ".*failed to update OS to " + regexp.QuoteMeta(nonBootableImage) + ".*error running rpm-ostree rebase.*ostree.bootable.*"
expectedNDReason = "1 nodes are reporting degraded status on sync"
)
checkInvalidOsImagesDegradedStatus(oc.AsAdmin(), nonBootableImage, layeringMcName, expectedNDMessage, expectedNDReason)
})
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-54054-Not pullable layered osImage provided[Disruptive]", func() {
var (
nonPullableImage = "quay.io/openshifttest/tc54054fakeimage:latest"
layeringMcName = "not-pullable-image-tc54054"
expectedNDMessage = ".*" + regexp.QuoteMeta(nonPullableImage) + ".*error"
expectedNDReason = "1 nodes are reporting degraded status on sync"
)
checkInvalidOsImagesDegradedStatus(oc.AsAdmin(), nonPullableImage, layeringMcName, expectedNDMessage, expectedNDReason)
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Critical-54159-[P1] Apply a new osImage on a cluster with already installed rpms [Disruptive]", func() {
var (
rpmName = "wget"
yumRepoTemplate = generateTemplateAbsolutePath("centos.repo")
yumRepoFile = "/etc/yum.repos.d/tc-54159-centos.repo"
proxy = NewResource(oc.AsAdmin(), "proxy", "cluster")
coreOSMcp = GetCoreOsCompatiblePool(oc.AsAdmin())
node = coreOSMcp.GetCoreOsNodesOrFail()[0]
)
architecture.SkipArchitectures(oc, architecture.MULTI, architecture.S390X, architecture.PPC64LE)
dockerFileCommands := `
RUN echo "echo 'Hello world! '$(whoami)" > /usr/bin/tc_54159_rpm_and_osimage && chmod 1755 /usr/bin/tc_54159_rpm_and_osimage
`
// Build the new osImage
osImageBuilder := OsImageBuilderInNode{node: node, dockerFileCommands: dockerFileCommands}
digestedImage, berr := osImageBuilder.CreateAndDigestOsImage()
o.Expect(berr).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Install rpm in first node
exutil.By("Installing rpm package in first working node")
logger.Infof("Copy yum repo to node")
o.Expect(node.CopyFromLocal(yumRepoTemplate, yumRepoFile)).
NotTo(o.HaveOccurred(),
"Error copying %s to %s in node %s", yumRepoTemplate, yumRepoFile, node.GetName())
// rpm-ostree only uses the proxy from the yum.repos.d configuration, it ignores the env vars.
logger.Infof("Configure proxy in yum")
_, err := node.DebugNodeWithChroot("sed", "-i", "s#proxy=#proxy="+proxy.GetOrFail(`{.status.httpProxy}`)+"#g", yumRepoFile)
o.Expect(err).NotTo(o.HaveOccurred(), "Error configuring the proxy in the centos yum repo config")
defer func() {
logger.Infof("Start defer logic to uninstall the %s rpm", rpmName)
waitErr := node.WaitUntilRpmOsTreeIsIdle()
if waitErr != nil {
node.CancelRpmOsTreeTransactions()
}
node.UninstallRpm(rpmName)
node.DebugNodeWithChroot("rm", yumRepoFile)
node.Reboot()
coreOSMcp.waitForComplete()
// Because of a bug in SNO after a reboot the controller cannot get the lease properly
// We wait until the controller gets the lease. We make sure that the next test will receive a fully clean environment with the controller ready
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "10m", "20s").Should(o.BeTrue(),
"Controller never acquired lease after the nodes was rebooted")
// Printing the status, apart from tracing the exact status of rpm-ostree,
// is a way of waiting for the node to be ready after the reboot, so that the next test case
// can be executed without problems. Because the status cannot be retreived until the node is ready.
status, _ := node.GetRpmOstreeStatus(false)
logger.Infof(status)
}()
// We wait, but we dont fail, if it does not become idle we cancel the transaction in the installation command
waitErr := node.WaitUntilRpmOsTreeIsIdle()
if waitErr != nil {
logger.Infof("rpm-ostree state is NOT IDLE. We cancel the current transactions to continue the test!!!")
cOut, err := node.CancelRpmOsTreeTransactions()
o.Expect(err).
NotTo(o.HaveOccurred(),
"Error cancelling transactions in node %s.\n%s", node.GetName(), cOut)
}
instOut, err := node.InstallRpm(rpmName)
logger.Debugf("Install rpm output: %s", instOut)
o.Expect(err).
NotTo(o.HaveOccurred(),
"Error installing '%s' rpm in node %s", rpmName, node.GetName())
o.Expect(node.Reboot()).To(o.Succeed(),
"Error rebooting node %s", node.GetName())
// In SNO clusters when we reboot the only node the connectivity is broken.
// Because the exutils.debugNode function fails the test if any problem happens
// we need to wait until the pool is stable (waitForComplete) before trying to debug the node again, even if we do it inside an Eventually instruction
coreOSMcp.waitForComplete()
logger.Infof("Check that the wget binary is available")
o.Eventually(func() error {
_, err := node.DebugNodeWithChroot("which", "wget")
return err
}, "15m", "20s").Should(o.Succeed(),
"Error. wget binay is not available after installing '%s' rpm in node %s.", rpmName, node.GetName())
logger.Infof("OK\n")
// Capture current rpm-ostree status
exutil.By("Capture the current ostree deployment")
o.Eventually(node.IsRpmOsTreeIdle, "10m", "20s").
Should(o.BeTrue(), "rpm-ostree status didn't become idle after installing wget")
initialDeployment, derr := node.GetBootedOsTreeDeployment(false)
o.Expect(derr).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in node %s", node.GetName())
logger.Infof("Current status with date:\n %s", initialDeployment)
o.Expect(initialDeployment).
To(o.MatchRegexp("LayeredPackages: .*%s", rpmName),
"rpm-ostree is not reporting the installed '%s' package in the rpm-ostree status command", rpmName)
initialBootedImage, err := node.GetCurrentBootOSImage()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the initial booted image")
logger.Infof("Initial booted osImage: %s", initialBootedImage)
logger.Infof("OK\n")
// Create MC and wait for MCP
exutil.By("Create a MC to deploy the new osImage")
layeringMcName := "layering-mc-54159"
layeringMC := NewMachineConfig(oc.AsAdmin(), layeringMcName, coreOSMcp.GetName())
layeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
layeringMC.skipWaitForMcp = true
defer layeringMC.delete()
layeringMC.create()
// Because of a bug in SNO after a reboot the controller cannot get the lease properly
// We wait until the controller gets the lease
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "10m", "20s").Should(o.BeTrue(),
"Controller never acquired lease after the nodes was rebooted")
coreOSMcp.waitForComplete()
logger.Infof("The new osImage was deployed successfully\n")
// Check rpm-ostree status
exutil.By("Check that the rpm-ostree status is reporting the right booted image and installed rpm")
bootedDeployment, err := node.GetBootedOsTreeDeployment(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in node %s", node.GetName())
logger.Infof("Current rpm-ostree booted status:\n%s\n", bootedDeployment)
o.Expect(bootedDeployment).
To(o.MatchRegexp("LayeredPackages: .*%s", rpmName),
"rpm-ostree is not reporting the installed 'wget' package in the rpm-ostree status command")
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(digestedImage),
"container reference in the status is not reporting the right booted image")
logger.Infof("OK!\n")
// Check rpm is installed
exutil.By("Check that the rpm is installed even if we use the new osImage")
rpmOut, err := node.DebugNodeWithChroot("rpm", "-q", "wget")
o.Expect(err).
NotTo(o.HaveOccurred(),
"Error. %s rpm is not installed after changing the osImage in node %s.\n%s", rpmName, node.GetName(), rpmOut)
wOut, err := node.DebugNodeWithChroot("which", "wget")
o.Expect(err).
NotTo(o.HaveOccurred(),
"Error. wget binay is not available after installing '%s' rpm in node %s.\n%s", rpmName, node.GetName(), wOut)
logger.Infof("OK\n")
// Check osImage content
exutil.By("Check that the new osImage content was deployed properly")
rf := NewRemoteFile(node, "/usr/bin/tc_54159_rpm_and_osimage")
o.Expect(rf.Fetch()).
ShouldNot(o.HaveOccurred(),
"Error getting information about file %s in node %s", rf.GetFullPath(), node.GetName())
o.Expect(rf.GetNpermissions()).To(o.Equal("1755"),
"Error, permissions of %s in node %s are not the expected ones", rf.GetFullPath(), node.GetName())
o.Expect(rf.GetTextContent()).To(o.ContainSubstring("Hello world"),
"Error, content of %s in node %s is not the expected ones", rf.GetFullPath(), node.GetName())
logger.Infof("OK\n")
// Delete the MC and wait for MCP
exutil.By("Delete the MC so that original osImage is restored")
layeringMC.delete()
logger.Infof("MC was successfully deleted\n")
// Check the rpm-ostree status after the MC deletion
exutil.By("Check that the original ostree deployment was restored")
logger.Infof("Waiting for rpm-ostree status to be idle")
o.Eventually(node.IsRpmOsTreeIdle, "10m", "20s").
Should(o.BeTrue(), "rpm-ostree status didn't become idle after installing wget")
logger.Infof("Checking original status")
deployment, derr := node.GetBootedOsTreeDeployment(false) // for debugging
o.Expect(derr).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in node %s", node.GetName())
logger.Infof("Current status with date:\n %s", deployment)
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(initialBootedImage),
"Error! the initial osImage was not properly restored after deleting the MachineConfig")
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonPreRelease-Medium-54049-[P2] Verify base images in the release image", func() {
var (
oldMachineConfigOsImage = "machine-os-content"
coreExtensions = "rhel-coreos-extensions"
)
exutil.By("Extract pull-secret")
pullSecret := GetPullSecret(oc.AsAdmin())
// TODO: when the code to create a tmp directory in the beforeEach section is merged, use ExtractToDir method instead
secretExtractDir, err := pullSecret.Extract()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error extracting pull-secret")
logger.Infof("Pull secret has been extracted to: %s\n", secretExtractDir)
dockerConfigFile := filepath.Join(secretExtractDir, ".dockerconfigjson")
exutil.By("Get base image for layering")
baseImage, err := getImageFromReleaseInfo(oc.AsAdmin(), LayeringBaseImageReleaseInfo, dockerConfigFile)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the base image to build new osImages")
logger.Infof("Base image: %s\n", baseImage)
exutil.By("Inspect base image information")
skopeoCLI := NewSkopeoCLI().SetAuthFile(dockerConfigFile)
inspectInfo, err := skopeoCLI.Run("inspect").Args("--tls-verify=false", "--config", "docker://"+baseImage).Output()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error using 'skopeo' to inspect base image %s", baseImage)
logger.Infof("Check if image is bootable")
inspectJSON := JSON(inspectInfo)
ostreeBootable := inspectJSON.Get("config").Get("Labels").Get("ostree.bootable").ToString()
o.Expect(ostreeBootable).To(o.Equal("true"),
`The base image %s is expected to be bootable (.config.Labels.ostree\.bootable == "true", but skopeo information says that it is not bootable. %s`,
baseImage, inspectInfo)
logger.Infof("OK!\n")
exutil.By("Verify that old machine config os content is not present in the release info")
mcOsIMage, _ := getImageFromReleaseInfo(oc.AsAdmin(), oldMachineConfigOsImage, dockerConfigFile)
o.Expect(mcOsIMage).To(o.ContainSubstring(`no image tag "`+oldMachineConfigOsImage+`" exists`),
"%s image should not be present in the release image, but we can find it with value %s", oldMachineConfigOsImage, mcOsIMage)
logger.Infof("OK!\n")
exutil.By("Verify that new core extensions image is present in the release info")
coreExtensionsValue, exErr := getImageFromReleaseInfo(oc.AsAdmin(), coreExtensions, dockerConfigFile)
o.Expect(exErr).NotTo(o.HaveOccurred(),
"Error getting the new core extensions image")
o.Expect(coreExtensionsValue).NotTo(o.BeEmpty(),
"%s image should be present in the release image, but we cannot find it with value %s", coreExtensions)
logger.Infof("%s is present in the release infor with value %s", coreExtensions, coreExtensionsValue)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-54909-[P1] Configure extensions while using a custom osImage [Disruptive]", func() {
// Due to https://issues.redhat.com/browse/OCPBUGS-31255 in this test case pools will be degraded intermittently. They will be degraded and automatically fixed in a few minutes/seconds
// Because of that we need to use WaitForUpdatedStatus instead of waitForComplete, since WaitForUpdatedStatus will not fail if a pool is degraded for just a few minutes but the configuration is applied properly
architecture.SkipArchitectures(oc, architecture.MULTI, architecture.S390X, architecture.PPC64LE)
var (
rpmName = "zsh"
extensionRpmName = "usbguard"
dockerFileCommands = fmt.Sprintf(`
COPY openshift-config-user-ca-bundle.crt /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt
RUN update-ca-trust && \
rm /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt
RUN printf '[baseos]\nname=CentOS-$releasever - Base\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/BaseOS/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n[appstream]\nname=CentOS-$releasever - AppStream\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/AppStream/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n' > /etc/yum.repos.d/centos.repo && \
rpm-ostree install %s && \
rpm-ostree cleanup -m && \
ostree container commit
`, rpmName)
workerNode = NewNodeList(oc).GetAllCoreOsWokerNodesOrFail()[0]
masterNode = NewNodeList(oc).GetAllMasterNodesOrFail()[0]
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
mMcp.SetWaitingTimeForExtensionsChange()
wMcp.SetWaitingTimeForExtensionsChange()
defer mMcp.WaitForUpdatedStatus()
defer wMcp.WaitForUpdatedStatus()
// Build the new osImage
osImageBuilder := OsImageBuilderInNode{node: workerNode, dockerFileCommands: dockerFileCommands}
defer func() { _ = osImageBuilder.CleanUp() }()
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create MC to apply the config to worker nodes
exutil.By("Create a MC to deploy the new osImage in 'worker' pool")
wLayeringMcName := "tc-54909-layering-extensions-worker"
wLayeringMC := NewMachineConfig(oc.AsAdmin(), wLayeringMcName, MachineConfigPoolWorker)
wLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
wLayeringMC.skipWaitForMcp = true
defer wLayeringMC.deleteNoWait()
wLayeringMC.create()
// Create MC to apply the config to master nodes
exutil.By("Create a MC to deploy the new osImage in 'master' pool")
mLayeringMcName := "tc-54909-layering-extensions-master"
mLayeringMC := NewMachineConfig(oc.AsAdmin(), mLayeringMcName, MachineConfigPoolMaster)
mLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
mLayeringMC.skipWaitForMcp = true
defer mLayeringMC.deleteNoWait()
mLayeringMC.create()
// Wait for pools
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new osImage was deployed successfully in 'worker' pool\n")
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new osImage was deployed successfully in 'master' pool\n")
// Check rpm-ostree status in worker node
exutil.By("Check that the rpm-ostree status is reporting the right booted image in worker nodes")
wStatus, err := workerNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s", workerNode.GetName())
logger.Infof("Current rpm-ostree status in worker node:\n%s\n", wStatus)
wDeployment, err := workerNode.GetBootedOsTreeDeployment(true)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s", workerNode.GetName())
wContainerRef, jerr := JSON(wDeployment).GetSafe("container-image-reference")
o.Expect(jerr).NotTo(o.HaveOccurred(),
"We cant get 'container-image-reference' from the deployment status in worker node. Wrong rpm-ostree status!")
o.Expect(wContainerRef.ToString()).To(o.Equal("ostree-unverified-registry:"+digestedImage),
"container reference in the worker node's status is not the exepeced one")
logger.Infof("OK!\n")
// Check rpm-ostree status in master node
exutil.By("Check that the rpm-ostree status is reporting the right booted image in master nodes")
mStatus, err := masterNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s", masterNode.GetName())
logger.Infof("Current rpm-ostree status in master node:\n%s\n", mStatus)
mDeployment, err := masterNode.GetBootedOsTreeDeployment(true)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s", masterNode.GetName())
mContainerRef, jerr := JSON(mDeployment).GetSafe("container-image-reference")
o.Expect(jerr).NotTo(o.HaveOccurred(),
"We cant get 'container-image-reference' from the deployment status in master node. Wrong rpm-ostree status!")
o.Expect(mContainerRef.ToString()).To(o.Equal("ostree-unverified-registry:"+digestedImage),
"container reference in the master node's status is not the exepeced one")
logger.Infof("OK!\n")
// Check rpm is installed in worker node
exutil.By(fmt.Sprintf("Check that the %s rpm is installed in worker node", rpmName))
o.Expect(workerNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.", rpmName, workerNode.GetName())
logger.Infof("OK\n")
// Check rpm is installed in master node
exutil.By(fmt.Sprintf("Check that the %s rpm is installed in master node", rpmName))
o.Expect(masterNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", rpmName, workerNode.GetName())
logger.Infof("OK\n")
// Create MC to apply usbguard extension to worker nodes
exutil.By("Create a MC to deploy the usbgard extension in 'worker' pool")
wUsbguardMcName := "tc-54909-extension-usbguard-worker"
wUsbguardMC := NewMachineConfig(oc.AsAdmin(), wUsbguardMcName, MachineConfigPoolWorker).SetMCOTemplate("change-worker-extension-usbguard.yaml")
wUsbguardMC.skipWaitForMcp = true
defer wUsbguardMC.deleteNoWait()
wUsbguardMC.create()
// Create MC to apply usbguard extension to master nodes
exutil.By("Create a MC to deploy the usbguard extension in 'master' pool")
mUsbguardMcName := "tc-54909-extension-usbguard-master"
mUsbguardMC := NewMachineConfig(oc.AsAdmin(), mUsbguardMcName, MachineConfigPoolMaster).SetMCOTemplate("change-worker-extension-usbguard.yaml")
mUsbguardMC.skipWaitForMcp = true
defer mUsbguardMC.deleteNoWait()
mUsbguardMC.create()
// Wait for pools
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new config was applied successfully in 'worker' pool\n")
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new config was applied successfully in 'master' pool\n")
// Check that rpms are installed in worker node after the extension
exutil.By("Check that both rpms are installed in worker node after the extension")
o.Expect(workerNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.", rpmName, workerNode.GetName())
o.Expect(workerNode.RpmIsInstalled(extensionRpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.", extensionRpmName, workerNode.GetName())
logger.Infof("OK\n")
// Check that rpms are installed in master node after the extension
exutil.By("Check that both rpms are installed in master node after the extension")
o.Expect(masterNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", rpmName, masterNode.GetName())
o.Expect(masterNode.RpmIsInstalled(extensionRpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", extensionRpmName, masterNode.GetName())
logger.Infof("OK\n")
// Check rpm-ostree status in worker node after extension
exutil.By("Check that the rpm-ostree status is reporting the right booted image in worker nodes after the extension is installed")
wStatus, err = workerNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s after the extension is installed", workerNode.GetName())
logger.Infof("Current rpm-ostree status in worker node after extension:\n%s\n", wStatus)
o.Expect(wStatus).To(o.MatchRegexp("(?s)LayeredPackages:.*usbguard"),
"Status in worker node %s is not reporting the Layered %s package", workerNode.GetName(), extensionRpmName)
wDeployment, err = workerNode.GetBootedOsTreeDeployment(true)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s after the extension is installed", workerNode.GetName())
wContainerRef, jerr = JSON(wDeployment).GetSafe("container-image-reference")
o.Expect(jerr).NotTo(o.HaveOccurred(),
"We cant get 'container-image-reference' from the deployment status in worker node after the extension is installed. Wrong rpm-ostree status!")
o.Expect(wContainerRef.ToString()).To(o.Equal("ostree-unverified-registry:"+digestedImage),
"container reference in the worker node's status is not the exepeced one after the extension is installed")
logger.Infof("OK!\n")
// Check rpm-ostree status in master node after the extension
exutil.By("Check that the rpm-ostree status is reporting the right booted image in master nodes after the extension is installed")
mStatus, err = masterNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s after the extension is installed", masterNode.GetName())
logger.Infof("Current rpm-ostree status in master node:\n%s\n", mStatus)
o.Expect(mStatus).To(o.MatchRegexp("(?s)LayeredPackages:.*usbguard"),
"Status in master node %s is not reporting the Layered %s package", workerNode.GetName(), extensionRpmName)
mDeployment, err = masterNode.GetBootedOsTreeDeployment(true)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s the extension is installed", masterNode.GetName())
mContainerRef, jerr = JSON(mDeployment).GetSafe("container-image-reference")
o.Expect(jerr).NotTo(o.HaveOccurred(),
"We cant get 'container-image-reference' from the deployment status in master node after the extension is installed. Wrong rpm-ostree status!")
o.Expect(mContainerRef.ToString()).To(o.Equal("ostree-unverified-registry:"+digestedImage),
"container reference in the master node's status is not the exepeced one after the extension is installed")
logger.Infof("OK!\n")
exutil.By("Remove custom layering MCs")
wLayeringMC.deleteNoWait()
mLayeringMC.deleteNoWait()
logger.Infof("OK!\n")
// Wait for pools
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new config was applied successfully in 'worker' pool\n")
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new config was applied successfully in 'master' pool\n")
// Check that extension rpm is installed in the worker node, but custom layering rpm is not
exutil.By("Check that extension rpm is installed in worker node but custom layering rpm is not")
o.Expect(workerNode.RpmIsInstalled(rpmName)).
To(o.BeFalse(),
"Error. %s rpm is installed in worker node %s but it should not be installed.", rpmName, workerNode.GetName())
o.Expect(workerNode.RpmIsInstalled(extensionRpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.\n%s", extensionRpmName, workerNode.GetName())
logger.Infof("OK\n")
// Check that extension rpm is installed in the master node, but custom layering rpm is not
exutil.By("Check that both rpms are installed in master node")
o.Expect(masterNode.RpmIsInstalled(rpmName)).
To(o.BeFalse(),
"Error. %s rpm is installed in master node %s but it should not be installed.", rpmName, masterNode.GetName())
o.Expect(masterNode.RpmIsInstalled(extensionRpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", extensionRpmName, masterNode.GetName())
logger.Infof("OK\n")
// Check rpm-ostree status in worker node after deleting custom osImage
exutil.By("Check that the rpm-ostree status is reporting the right booted image in worker nodes after deleting custom osImage")
wStatus, err = workerNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s after deleting custom osImage", workerNode.GetName())
logger.Infof("Current rpm-ostree status in worker node after deleting custom osImage:\n%s\n", wStatus)
o.Expect(wStatus).To(o.MatchRegexp("(?s)LayeredPackages:.*usbguard"),
"Status in worker node %s is not reporting the Layered %s package after deleting custom osImage", workerNode.GetName(), extensionRpmName)
o.Expect(wStatus).NotTo(o.ContainSubstring(digestedImage),
"Status in worker node %s is reporting the custom osImage, but it shouldn't because custom osImage was deleted", workerNode.GetName(), extensionRpmName)
logger.Infof("OK!\n")
// Check rpm-ostree status in master node after deleting custom osImage
exutil.By("Check that the rpm-ostree status is reporting the right booted image in master nodes after deleting custom osImage")
mStatus, err = masterNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s after deleting custom osIMage", masterNode.GetName())
logger.Infof("Current rpm-ostree status in master node:\n%s\n", mStatus)
o.Expect(mStatus).To(o.MatchRegexp("(?s)LayeredPackages:.*usbguard"),
"Status in master node %s is not reporting the Layered %s package after deleting custom osImage", workerNode.GetName(), extensionRpmName)
o.Expect(mStatus).NotTo(o.ContainSubstring(digestedImage),
"Status in master node %s is reporting the custom osImage, but it shouldn't because custom osImage was deleted", workerNode.GetName(), extensionRpmName)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-54915-[P1] Configure kerneltype while using a custom osImage [Disruptive]", func() {
// Due to https://issues.redhat.com/browse/OCPBUGS-31255 in this test case pools will be degraded intermittently. They will be degraded and automatically fixed in a few minutes/seconds
// Because of that we need to use WaitForUpdatedStatus instead of waitForComplete, since WaitForUpdatedStatus will not fail if a pool is degraded for just a few minutes but the configuration is applied properly
architecture.SkipArchitectures(oc, architecture.MULTI, architecture.S390X, architecture.PPC64LE, architecture.ARM64)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, GCPPlatform)
var (
rpmName = "zsh"
dockerFileCommands = fmt.Sprintf(`
RUN printf '[baseos]\nname=CentOS-$releasever - Base\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/BaseOS/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n[appstream]\nname=CentOS-$releasever - AppStream\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/AppStream/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n' > /etc/yum.repos.d/centos.repo && \
rpm-ostree install %s && \
rpm-ostree cleanup -m && \
ostree container commit
`, rpmName)
rtMcTemplate = "set-realtime-kernel.yaml"
workerNode = NewNodeList(oc).GetAllCoreOsWokerNodesOrFail()[0]
masterNode = NewNodeList(oc).GetAllMasterNodesOrFail()[0]
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
mMcp.SetWaitingTimeForKernelChange()
wMcp.SetWaitingTimeForKernelChange()
defer mMcp.WaitForUpdatedStatus()
defer wMcp.WaitForUpdatedStatus()
// Create a MC to use realtime kernel in the worker pool
exutil.By("Create machine config to enable RT kernel in worker pool")
wRtMcName := "50-realtime-kernel-worker"
wRtMc := NewMachineConfig(oc.AsAdmin(), wRtMcName, MachineConfigPoolWorker).SetMCOTemplate(rtMcTemplate)
wRtMc.skipWaitForMcp = true
defer wRtMc.deleteNoWait()
// TODO: When we extract the "mcp.waitForComplete" from the "create" method, we need to take into account that if
// we are configuring a rt-kernel we need to wait longer.
wRtMc.create()
logger.Infof("OK!\n")
// Create a MC to use realtime kernel in the master pool
exutil.By("Create machine config to enable RT kernel in master pool")
mRtMcName := "50-realtime-kernel-master"
mRtMc := NewMachineConfig(oc.AsAdmin(), mRtMcName, MachineConfigPoolMaster).SetMCOTemplate(rtMcTemplate)
mRtMc.skipWaitForMcp = true
defer mRtMc.deleteNoWait()
mRtMc.create()
logger.Infof("OK!\n")
// Wait for the pools to be updated
exutil.By("Wait for pools to be updated after applying the new realtime kernel")
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
// Check that realtime kernel is active in worker nodes
exutil.By("Check realtime kernel in worker nodes")
o.Expect(workerNode.IsRealTimeKernel()).Should(o.BeTrue(),
"Kernel is not realtime kernel in worker node %s", workerNode.GetName())
logger.Infof("OK!\n")
// Check that realtime kernel is active in master nodes
exutil.By("Check realtime kernel in master nodes")
o.Expect(masterNode.IsRealTimeKernel()).Should(o.BeTrue(),
"Kernel is not realtime kernel in master node %s", masterNode.GetName())
logger.Infof("OK!\n")
// Build the new osImage
exutil.By("Build a custom osImage")
osImageBuilder := OsImageBuilderInNode{node: workerNode, dockerFileCommands: dockerFileCommands}
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create MC to apply the config to worker nodes
exutil.By("Create a MC to deploy the new osImage in 'worker' pool")
wLayeringMcName := "tc-54915-layering-kerneltype-worker"
wLayeringMC := NewMachineConfig(oc.AsAdmin(), wLayeringMcName, MachineConfigPoolWorker)
wLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
wLayeringMC.skipWaitForMcp = true
defer wLayeringMC.deleteNoWait()
wLayeringMC.create()
logger.Infof("OK!\n")
// Create MC to apply the config to master nodes
exutil.By("Create a MC to deploy the new osImage in 'master' pool")
mLayeringMcName := "tc-54915-layering-kerneltype-master"
mLayeringMC := NewMachineConfig(oc.AsAdmin(), mLayeringMcName, MachineConfigPoolMaster)
mLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
mLayeringMC.skipWaitForMcp = true
defer mLayeringMC.deleteNoWait()
mLayeringMC.create()
logger.Infof("OK!\n")
// Wait for the pools to be updated
exutil.By("Wait for pools to be updated after applying the new osImage")
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
// Check rpm is installed in worker node
exutil.By("Check that the rpm is installed in worker node")
o.Expect(workerNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.", rpmName, workerNode.GetName())
wStatus, err := workerNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s", masterNode.GetName())
o.Expect(wStatus).Should(o.And(
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-core"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-kvm"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-modules"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-modules-extra"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-core"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-modules"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-modules-extra")),
"rpm-ostree status is not reporting the kernel layered packages properly")
logger.Infof("OK\n")
// Check rpm is installed in master node
exutil.By("Check that the rpm is installed in master node")
o.Expect(masterNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", rpmName, workerNode.GetName())
mStatus, err := masterNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s", masterNode.GetName())
o.Expect(mStatus).Should(o.And(
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-core"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-kvm"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-modules"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-modules-extra"),
o.MatchRegexp("(?s)RemovedBasePackages: .*kernel-core"),
o.MatchRegexp("(?s)RemovedBasePackages: .*kernel-modules"),
o.MatchRegexp("(?s)RemovedBasePackages: .*kernel"),
o.MatchRegexp("(?s)RemovedBasePackages: .*kernel-modules-extra")),
"rpm-ostree status is not reporting the kernel layered packages properly")
logger.Infof("OK\n")
// Check that realtime kernel is active in worker nodes
exutil.By("Check realtime kernel in worker nodes")
o.Expect(workerNode.IsRealTimeKernel()).Should(o.BeTrue(),
"Kernel is not realtime kernel in worker node %s", workerNode.GetName())
logger.Infof("OK!\n")
// Check that realtime kernel is active in master nodes
exutil.By("Check realtime kernel in master nodes")
o.Expect(masterNode.IsRealTimeKernel()).Should(o.BeTrue(),
"Kernel is not realtime kernel in master node %s", masterNode.GetName())
logger.Infof("OK!\n")
// Delete realtime configs
exutil.By("Delete the realtime kernel MCs")
wRtMc.deleteNoWait()
mRtMc.deleteNoWait()
logger.Infof("OK!\n")
// Wait for the pools to be updated
exutil.By("Wait for pools to be updated after deleting the realtime kernel configs")
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
// Check that realtime kernel is not active in worker nodes anymore
exutil.By("Check realtime kernel in worker nodes")
o.Expect(workerNode.IsRealTimeKernel()).Should(o.BeFalse(),
"Realtime kernel should not be active anymore in worker node %s", workerNode.GetName())
logger.Infof("OK!\n")
// Check that realtime kernel is not active in master nodes anymore
exutil.By("Check realtime kernel in master nodes")
o.Expect(masterNode.IsRealTimeKernel()).Should(o.BeFalse(),
"Realtime kernel should not be active anymore in master node %s", masterNode.GetName())
logger.Infof("OK!\n")
// Check rpm is installed in worker node
exutil.By("Check that the rpm is installed in worker node")
o.Expect(workerNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.", rpmName, workerNode.GetName())
wStatus, err = workerNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s", masterNode.GetName())
o.Expect(wStatus).ShouldNot(o.And(
o.ContainSubstring("LayeredPackages"),
o.ContainSubstring("RemovedBasePackages")),
"rpm-ostree status is not reporting the kernel layered packages properly in worker node %s", workerNode.GetName())
logger.Infof("OK\n")
// Check rpm is installed in master node
exutil.By("Check that the rpm is installed in master node")
o.Expect(masterNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", rpmName, workerNode.GetName())
mStatus, err = masterNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s", masterNode.GetName())
o.Expect(mStatus).ShouldNot(o.And(
o.ContainSubstring("LayeredPackages"),
o.ContainSubstring("RemovedBasePackages")),
"rpm-ostree status is not reporting the kernel layered packages properly in master node %s", workerNode.GetName())
logger.Infof("OK\n")
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Medium-55002-[P2] Get OSImageURL override related metric data available in telemetry [Disruptive]", func() {
// Due to https://issues.redhat.com/browse/OCPBUGS-31255 in this test case pools will be degraded intermittently. They will be degraded and automatically fixed in a few minutes/seconds
// Because of that we need to use WaitForUpdatedStatus instead of waitForComplete, since WaitForUpdatedStatus will not fail if a pool is degraded for just a few minutes but the configuration is applied properly
var (
osImageURLOverrideQuery = `os_image_url_override`
dockerFileCommands = "RUN touch /etc/hello-world-file"
workerNode = NewNodeList(oc).GetAllCoreOsWokerNodesOrFail()[0]
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
exutil.By("Check that the metric is exposed to telemetry")
expectedExposedMetric := fmt.Sprintf(`{__name__=\"%s:sum\"}`, osImageURLOverrideQuery)
telemetryConfig := NewNamespacedResource(oc.AsAdmin(), "Configmap", "openshift-monitoring", "telemetry-config")
o.Expect(telemetryConfig.Get(`{.data}`)).To(o.ContainSubstring(expectedExposedMetric),
"Metric %s, is not exposed to telemetry", osImageURLOverrideQuery)
exutil.By("Validating initial os_image_url_override values")
mon, err := exutil.NewPrometheusMonitor(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating new thanos monitor")
osImageOverride, err := mon.SimpleQuery(osImageURLOverrideQuery)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error querying metric: %s", osImageURLOverrideQuery)
// Here we are logging both master and worker pools
logger.Infof("Initial %s query: %s", osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate worker pool's %s value", osImageURLOverrideQuery)
o.Expect(wMcp.GetReportedOsImageOverrideValue()).To(o.Equal("0"),
"Worker pool's %s initial value should be 0. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate master pool's %s value", osImageURLOverrideQuery)
o.Expect(mMcp.GetReportedOsImageOverrideValue()).To(o.Equal("0"),
"Master pool's %s initial value should be 0. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("OK!\n")
// Build the new osImage
exutil.By("Build a custom osImage")
osImageBuilder := OsImageBuilderInNode{node: workerNode, dockerFileCommands: dockerFileCommands}
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create MC to apply the config to worker nodes
exutil.By("Create a MC to deploy the new osImage in 'worker' pool")
wLayeringMcName := "tc-55002-layering-telemetry-worker"
wLayeringMC := NewMachineConfig(oc.AsAdmin(), wLayeringMcName, MachineConfigPoolWorker)
wLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
wLayeringMC.skipWaitForMcp = true
defer mMcp.WaitForUpdatedStatus()
defer wMcp.WaitForUpdatedStatus()
defer wLayeringMC.deleteNoWait()
wLayeringMC.create()
logger.Infof("OK!\n")
// Create MC to apply the config to master nodes
exutil.By("Create a MC to deploy the new osImage in 'master' pool")
mLayeringMcName := "tc-55002-layering-telemetry-master"
mLayeringMC := NewMachineConfig(oc.AsAdmin(), mLayeringMcName, MachineConfigPoolMaster)
mLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
mLayeringMC.skipWaitForMcp = true
defer mLayeringMC.deleteNoWait()
mLayeringMC.create()
logger.Infof("OK!\n")
// Wait for the pools to be updated
exutil.By("Wait for pools to be updated after applying the new osImage")
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
exutil.By("Validating os_image_url_override values with overridden master and worker pools")
osImageOverride, err = mon.SimpleQuery(osImageURLOverrideQuery)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error querying metric: %s", osImageURLOverrideQuery)
// Here we are logging both master and worker pools
logger.Infof("Executed %s query: %s", osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate worker pool's %s value", osImageURLOverrideQuery)
o.Expect(wMcp.GetReportedOsImageOverrideValue()).To(o.Equal("1"),
"Worker pool's %s value with overridden master and worker pools should be 1. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate master pool's %s value", osImageURLOverrideQuery)
o.Expect(mMcp.GetReportedOsImageOverrideValue()).To(o.Equal("1"),
"Master pool's %s value with overridden master and worker pools should be 1. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("OK!\n")
exutil.By("Delete the MC that overrides worker pool's osImage and wait for the pool to be updated")
wLayeringMC.deleteNoWait()
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
exutil.By("Validating os_image_url_override values with overridden master pool only")
osImageOverride, err = mon.SimpleQuery(osImageURLOverrideQuery)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error querying metric: %s", osImageURLOverrideQuery)
// Here we are logging both master and worker pools
logger.Infof("Executed %s query: %s", osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate worker pool's %s value", osImageURLOverrideQuery)
o.Expect(wMcp.GetReportedOsImageOverrideValue()).To(o.Equal("0"),
"Worker pool's %s value should be 0 when only the master pool is overridden. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate master pool's %s value", osImageURLOverrideQuery)
o.Expect(mMcp.GetReportedOsImageOverrideValue()).To(o.Equal("1"),
"Master pool's %s value should be 1 when only the master pool is overridden. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("OK!\n")
exutil.By("Delete the MC that overrides master pool's osImage and wait for the pool to be updated")
mLayeringMC.deleteNoWait()
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
exutil.By("Validating os_image_url_override when no pool is overridden")
osImageOverride, err = mon.SimpleQuery(osImageURLOverrideQuery)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error querying metric: %s", osImageURLOverrideQuery)
// Here we are logging both master and worker pools
logger.Infof("Executed %s query: %s", osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate worker pool's %s value", osImageURLOverrideQuery)
o.Expect(wMcp.GetReportedOsImageOverrideValue()).To(o.Equal("0"),
"Worker pool's %s value should be 0 when no pool is overridden. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate master pool's %s value", osImageURLOverrideQuery)
o.Expect(mMcp.GetReportedOsImageOverrideValue()).To(o.Equal("0"),
"Master pool's %s value should be 0 when no pool is overridden. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Medium-54056-Update osImage using the internal registry to store the image [Disruptive]", func() {
var (
osImageNewFilePath = "/etc/hello-tc-54056"
dockerFileCommands = fmt.Sprintf(`
RUN touch %s
`, osImageNewFilePath)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
SkipTestIfCannotUseInternalRegistry(oc.AsAdmin())
architecture.SkipArchitectures(oc, architecture.MULTI, architecture.S390X, architecture.PPC64LE)
// Select the nodes
builderNode := mMcp.GetNodesOrFail()[0] // We always build the image in a master node to make sure it is CoreOs
// We test the image in a compact/sno compatible pool
mcp := GetCompactCompatiblePool(oc.AsAdmin())
if len(mcp.GetCoreOsNodesOrFail()) == 0 {
logger.Infof("The worker pool has no CoreOs nodes, we will use master pool for testing the osImage")
mcp = mMcp
}
node := mcp.GetCoreOsNodesOrFail()[0]
logger.Infof("Using pool %s and node %s for testing", mcp.GetName(), node.GetName())
// Build the new osImage
osImageBuilder := OsImageBuilderInNode{node: builderNode, dockerFileCommands: dockerFileCommands}
osImageBuilder.UseInternalRegistry = true
defer func() { _ = osImageBuilder.CleanUp() }()
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("Digested Image: %s", digestedImage)
logger.Infof("OK\n")
// Create MC and wait for MCP
exutil.By("Create a MC to deploy the new osImage")
layeringMcName := "layering-mc"
layeringMC := NewMachineConfig(oc.AsAdmin(), layeringMcName, mcp.GetName())
layeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
defer layeringMC.delete()
layeringMC.create()
mcp.waitForComplete()
logger.Infof("The new osImage was deployed successfully\n")
logger.Infof("OK\n")
// Check image content
exutil.By("Load remote resources to verify that the osImage content has been deployed properly")
tc54056File := NewRemoteFile(node, osImageNewFilePath)
o.Expect(tc54056File.Exists()).To(o.BeTrue(),
"The file %s included in the osImage should exist in the node %s, but it does not", osImageNewFilePath, node.GetName())
o.Expect(tc54056File.Fetch()).To(o.Succeed(),
"The content of file %s could not be retreived from node %s", osImageNewFilePath, node.GetName())
o.Expect(tc54056File.GetTextContent()).To(o.BeEmpty(),
"The file %s should be empty, but it is not. Current content: %s", osImageNewFilePath, tc54056File.GetTextContent())
logger.Infof("OK\n")
// Delete the MC and wait for MCP
exutil.By("Delete the MC so that the original osImage is restored")
layeringMC.delete()
mcp.waitForComplete()
logger.Infof("MC was successfully deleted\n")
logger.Infof("OK\n")
exutil.By("Check that the included new content is not present anymore")
o.Expect(tc54056File.Exists()).To(o.BeFalse(),
"The file %s included in the osImage should exist in the node %s, but it does not", osImageNewFilePath, node.GetName())
logger.Infof("OK\n")
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-67789-[P1] Configure 64k-pages kerneltype while using a custom osImage [Disruptive]", func() {
var (
mcTemplate64k = "set-64k-pages-kernel.yaml"
rpmName = "zsh"
dockerFileCommands = fmt.Sprintf(`
RUN printf '[baseos]\nname=CentOS-$releasever - Base\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/BaseOS/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n[appstream]\nname=CentOS-$releasever - AppStream\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/AppStream/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n' > /etc/yum.repos.d/centos.repo && \
rpm-ostree install %s && \
rpm-ostree cleanup -m && \
ostree container commit
`, rpmName)
)
architecture.SkipIfNoNodeWithArchitectures(oc.AsAdmin(), architecture.ARM64)
clusterinfra.SkipTestIfNotSupportedPlatform(oc.AsAdmin(), clusterinfra.GCP)
createdCustomPoolName := fmt.Sprintf("mco-test-%s", architecture.ARM64)
defer DeleteCustomMCP(oc.AsAdmin(), createdCustomPoolName)
mcp, nodes := GetPoolAndNodesForArchitectureOrFail(oc.AsAdmin(), createdCustomPoolName, architecture.ARM64, 1)
node := nodes[0]
mcp.SetWaitingTimeForKernelChange() // Increase waiting time
// Create a MC to use 64k-pages kernel
exutil.By("Create machine config to enable 64k-pages kernel")
mcName64k := fmt.Sprintf("tc-67789-64k-pages-kernel-%s", mcp.GetName())
mc64k := NewMachineConfig(oc.AsAdmin(), mcName64k, mcp.GetName()).SetMCOTemplate(mcTemplate64k)
defer mc64k.delete()
mc64k.create()
logger.Infof("OK!\n")
// Check that 64k-pages kernel is active
exutil.By("Check 64k-pages kernel")
o.Expect(node.Is64kPagesKernel()).Should(o.BeTrue(),
"Kernel is not 64k-pages kernel in node %s", node.GetName())
logger.Infof("OK!\n")
// Build the new osImage
exutil.By("Build a custom osImage")
osImageBuilder := OsImageBuilderInNode{node: node, dockerFileCommands: dockerFileCommands}
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create a MC to apply the config
exutil.By("Create a MC to deploy the new osImage")
layeringMcName := fmt.Sprintf("tc-67789-layering-64kpages-%s", mcp.GetName())
layeringMC := NewMachineConfig(oc.AsAdmin(), layeringMcName, mcp.GetName())
layeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
layeringMC.skipWaitForMcp = true
defer layeringMC.deleteNoWait()
layeringMC.create()
mcp.waitForComplete()
logger.Infof("OK!\n")
// Check that the expected (zsh+64k-pages kernel) rpms are installed
exutil.By("Check that all the expected rpms are installed")
o.Expect(
node.RpmIsInstalled(rpmName),
).To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in node %s.", rpmName, node.GetName())
o.Expect(
node.GetRpmOstreeStatus(false),
).Should(o.And(
o.MatchRegexp("(?s)LayeredPackages:.*kernel-64k-core"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-64k-modules"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-64k-modules-extra"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-core"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-modules"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel "),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-modules-extra")),
"rpm-ostree status is not reporting the kernel layered packages properly")
logger.Infof("OK\n")
// Check that 64k-pages kernel is active
exutil.By("Check 64k-pages kernel")
o.Expect(node.Is64kPagesKernel()).Should(o.BeTrue(),
"Kernel is not 64k-pages kernel in node %s", node.GetName())
logger.Infof("OK!\n")
// Delete 64k-pages config
exutil.By("Delete the 64k-pages kernel MC")
mc64k.delete()
logger.Infof("OK!\n")
// Check that 64k-pages kernel is not installed anymore
exutil.By("Check that 64k-pages kernel is not installed anymore")
o.Expect(node.Is64kPagesKernel()).Should(o.BeFalse(),
"Huge pages kernel should not be installed anymore in node %s", node.GetName())
logger.Infof("OK!\n")
// Check zsh rpm is installed
exutil.By("Check that the zsh rpm is still installed after we removed the 64k-pages kernel")
o.Expect(node.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in node %s.", rpmName, node.GetName())
o.Expect(
node.GetRpmOstreeStatus(false),
).ShouldNot(o.And(
o.ContainSubstring("LayeredPackages"),
o.ContainSubstring("RemovedBasePackages")),
"rpm-ostree status is not reporting the layered packages properly in node %s", node.GetName())
logger.Infof("OK\n")
})
})
// oc: the CLI
// image: the layered image that will be configured in the MC
// layeringMcName: the name of the MC
// expectedNDMessage: expected value for the message in the MCP NodeDegraded condition
// expectedNDReason: expected value for the reason in the MCP NodeDegraded condition
func checkInvalidOsImagesDegradedStatus(oc *exutil.CLI, image, layeringMcName, expectedNDMessage, expectedNDReason string) {
var (
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
)
// Create MC and wait for MCP
layeringMC := NewMachineConfig(oc.AsAdmin(), layeringMcName, mcp.GetName())
layeringMC.parameters = []string{"OS_IMAGE=" + image}
layeringMC.skipWaitForMcp = true
validateMcpNodeDegraded(layeringMC, mcp, expectedNDMessage, expectedNDReason, false)
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
5b86b7ac-73a5-4b56-b07c-e03057bb472c
|
checkInvalidOsImagesDegradedStatus
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
func checkInvalidOsImagesDegradedStatus(oc *exutil.CLI, image, layeringMcName, expectedNDMessage, expectedNDReason string) {
var (
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
)
// Create MC and wait for MCP
layeringMC := NewMachineConfig(oc.AsAdmin(), layeringMcName, mcp.GetName())
layeringMC.parameters = []string{"OS_IMAGE=" + image}
layeringMC.skipWaitForMcp = true
validateMcpNodeDegraded(layeringMC, mcp, expectedNDMessage, expectedNDReason, false)
}
|
mco
| |||||
test case
|
openshift/openshift-tests-private
|
b394447a-a70b-4d72-a543-051878dab5b4
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Critical-54085-[P1] Update osImage changing /etc /usr and rpm [Disruptive]
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Critical-54085-[P1] Update osImage changing /etc /usr and rpm [Disruptive]", func() {
architecture.SkipArchitectures(oc, architecture.MULTI, architecture.S390X, architecture.PPC64LE)
// Because of https proxies using their own user-ca certificate, we need to take into account the openshift-config-user-ca-bundle.crt file
coreOSMcp := GetCoreOsCompatiblePool(oc.AsAdmin())
node := coreOSMcp.GetCoreOsNodesOrFail()[0]
dockerFileCommands := `
RUN mkdir /etc/tc_54085 && chmod 3770 /etc/tc_54085 && ostree container commit
RUN echo 'Test case 54085 test file' > /etc/tc54085.txt && chmod 5400 /etc/tc54085.txt && ostree container commit
RUN echo 'echo "Hello world"' > /usr/bin/tc54085_helloworld && chmod 5770 /usr/bin/tc54085_helloworld && ostree container commit
COPY openshift-config-user-ca-bundle.crt /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt
RUN update-ca-trust && \
rm /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt && \
cd /etc/yum.repos.d/ && curl -LO https://pkgs.tailscale.com/stable/fedora/tailscale.repo && \
rpm-ostree install tailscale && rpm-ostree cleanup -m && \
systemctl enable tailscaled && \
ostree container commit
`
// Capture current rpm-ostree status
exutil.By("Capture the current ostree deployment")
initialBootedImage, err := node.GetCurrentBootOSImage()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the initial booted image")
logger.Infof("OK\n")
// Build the new osImage
osImageBuilder := OsImageBuilderInNode{node: node, dockerFileCommands: dockerFileCommands}
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create MC and wait for MCP
exutil.By("Create a MC to deploy the new osImage")
layeringMcName := "layering-mc"
layeringMC := NewMachineConfig(oc.AsAdmin(), layeringMcName, coreOSMcp.GetName())
layeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
layeringMC.skipWaitForMcp = true
defer layeringMC.delete()
layeringMC.create()
coreOSMcp.waitForComplete()
logger.Infof("The new osImage was deployed successfully\n")
// Check rpm-ostree status
exutil.By("Check that the rpm-ostree status is reporting the right booted image")
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(digestedImage),
"The booted image resported by rpm-ostree status is not the expected one")
logger.Infof("OK!\n")
// Check image content
exutil.By("Load remote resources to verify that the osImage content has been deployed properly")
tc54085Dir := NewRemoteFile(node, "/etc/tc_54085")
tc54085File := NewRemoteFile(node, "/etc/tc54085.txt")
binHelloWorld := NewRemoteFile(node, "/usr/bin/tc54085_helloworld")
o.Expect(tc54085Dir.Fetch()).ShouldNot(o.HaveOccurred(),
"Error getting information about file %s in node %s", tc54085Dir.GetFullPath(), node.GetName())
o.Expect(tc54085File.Fetch()).ShouldNot(o.HaveOccurred(),
"Error getting information about file %s in node %s", tc54085File.GetFullPath(), node.GetName())
o.Expect(binHelloWorld.Fetch()).ShouldNot(o.HaveOccurred(),
"Error getting information about file %s in node %s", binHelloWorld.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the directory in /etc exists and has the right permissions")
o.Expect(tc54085Dir.IsDirectory()).To(o.BeTrue(),
"Error, %s in node %s is not a directory", tc54085Dir.GetFullPath(), node.GetName())
o.Expect(tc54085Dir.GetNpermissions()).To(o.Equal("3770"),
"Error, permissions of %s in node %s are not the expected ones", tc54085Dir.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the file in /etc exists and has the right permissions")
o.Expect(tc54085File.GetNpermissions()).To(o.Equal("5400"),
"Error, permissions of %s in node %s are not the expected ones", tc54085File.GetFullPath(), node.GetName())
o.Expect(tc54085File.GetTextContent()).To(o.Equal("Test case 54085 test file\n"),
"Error, content of %s in node %s are not the expected one", tc54085File.GetFullPath(), node.GetName())
exutil.By("Check that the file in /usr/bin exists, has the right permissions and can be executed")
o.Expect(binHelloWorld.GetNpermissions()).To(o.Equal("5770"),
"Error, permissions of %s in node %s are not the expected ones", tc54085File.GetFullPath(), node.GetName())
output, herr := node.DebugNodeWithChroot("/usr/bin/tc54085_helloworld")
o.Expect(herr).NotTo(o.HaveOccurred(),
"Error executing 'hello world' executable file /usr/bin/tc54085_helloworld")
o.Expect(output).To(o.ContainSubstring("Hello world"),
"Error, 'Hellow world' executable file's output was not the expected one")
logger.Infof("OK!\n")
exutil.By("Check that the tailscale rpm has been deployed")
tailscaledRpm, rpmErr := node.DebugNodeWithChroot("rpm", "-q", "tailscale")
o.Expect(rpmErr).NotTo(o.HaveOccurred(),
"Error, getting the installed rpms in node %s. 'tailscale' rpm is not installed.", node.GetName())
o.Expect(tailscaledRpm).To(o.ContainSubstring("tailscale-"),
"Error, 'tailscale' rpm is not installed in node %s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the tailscaled.service unit is loaded, active and enabled")
tailscaledStatus, unitErr := node.GetUnitStatus("tailscaled.service")
o.Expect(unitErr).NotTo(o.HaveOccurred(),
"Error getting the status of the 'tailscaled.service' unit in node %s", node.GetName())
o.Expect(tailscaledStatus).Should(
o.And(
o.ContainSubstring("tailscaled.service"),
o.ContainSubstring("Active: active"), // is active
o.ContainSubstring("Loaded: loaded"), // is loaded
o.ContainSubstring("; enabled;")), // is enabled
"tailscaled.service unit should be loaded, active and enabled and it is not")
logger.Infof("OK!\n")
// Delete the MC and wait for MCP
exutil.By("Delete the MC so that the original osImage is restored")
layeringMC.delete()
coreOSMcp.waitForComplete()
logger.Infof("MC was successfully deleted\n")
// Check the rpm-ostree status after the MC deletion
exutil.By("Check that the original ostree deployment was restored")
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(initialBootedImage),
"Error! the initial osImage was not properly restored after deleting the MachineConfig")
logger.Infof("OK!\n")
// Check the image content after the MC deletion
exutil.By("Check that the directory in /etc does not exist anymore")
o.Expect(tc54085Dir.Fetch()).Should(o.HaveOccurred(),
"Error, file %s should not exist in node %s, but it exists", tc54085Dir.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the file in /etc does not exist anymore")
o.Expect(tc54085File.Fetch()).Should(o.HaveOccurred(),
"Error, file %s should not exist in node %s, but it exists", tc54085File.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the file in /usr/bin does not exist anymore")
o.Expect(binHelloWorld.Fetch()).Should(o.HaveOccurred(),
"Error, file %s should not exist in node %s, but it exists", binHelloWorld.GetFullPath(), node.GetName())
logger.Infof("OK!\n")
exutil.By("Check that the tailscale rpm is not installed anymore")
tailscaledRpm, rpmErr = node.DebugNodeWithChroot("rpm", "-q", "tailscale")
o.Expect(rpmErr).To(o.HaveOccurred(),
"Error, 'tailscale' rpm should not be installed in node %s, but it is installed.\n Output %s", node.GetName(), tailscaledRpm)
logger.Infof("OK!\n")
exutil.By("Check that the tailscaled.service is not present anymore")
tailscaledStatus, unitErr = node.GetUnitStatus("tailscaled.service")
o.Expect(unitErr).To(o.HaveOccurred(),
"Error, 'tailscaled.service' unit should not be available in node %s, but it is.\n Output %s", node.GetName(), tailscaledStatus)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
42767e91-0e87-4043-af50-fe51270d172c
|
Author:sregidor-ConnectedOnly-NonPreRelease-Longduration-Medium-54052-[P2] Not bootable layered osImage provided[Disruptive]
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
g.It("Author:sregidor-ConnectedOnly-NonPreRelease-Longduration-Medium-54052-[P2] Not bootable layered osImage provided[Disruptive]", func() {
var (
nonBootableImage = "quay.io/openshifttest/hello-openshift:1.2.0"
layeringMcName = "not-bootable-image-tc54052"
expectedNDMessage = ".*failed to update OS to " + regexp.QuoteMeta(nonBootableImage) + ".*error running rpm-ostree rebase.*ostree.bootable.*"
expectedNDReason = "1 nodes are reporting degraded status on sync"
)
checkInvalidOsImagesDegradedStatus(oc.AsAdmin(), nonBootableImage, layeringMcName, expectedNDMessage, expectedNDReason)
})
| |||||
test case
|
openshift/openshift-tests-private
|
711ad270-be85-4afd-a978-94230be21b1f
|
Author:sregidor-NonPreRelease-Longduration-Medium-54054-Not pullable layered osImage provided[Disruptive]
|
['"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-54054-Not pullable layered osImage provided[Disruptive]", func() {
var (
nonPullableImage = "quay.io/openshifttest/tc54054fakeimage:latest"
layeringMcName = "not-pullable-image-tc54054"
expectedNDMessage = ".*" + regexp.QuoteMeta(nonPullableImage) + ".*error"
expectedNDReason = "1 nodes are reporting degraded status on sync"
)
checkInvalidOsImagesDegradedStatus(oc.AsAdmin(), nonPullableImage, layeringMcName, expectedNDMessage, expectedNDReason)
})
| |||||
test case
|
openshift/openshift-tests-private
|
e1fec74f-a3f4-4471-8782-aaaf340eba49
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Critical-54159-[P1] Apply a new osImage on a cluster with already installed rpms [Disruptive]
|
['"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Critical-54159-[P1] Apply a new osImage on a cluster with already installed rpms [Disruptive]", func() {
var (
rpmName = "wget"
yumRepoTemplate = generateTemplateAbsolutePath("centos.repo")
yumRepoFile = "/etc/yum.repos.d/tc-54159-centos.repo"
proxy = NewResource(oc.AsAdmin(), "proxy", "cluster")
coreOSMcp = GetCoreOsCompatiblePool(oc.AsAdmin())
node = coreOSMcp.GetCoreOsNodesOrFail()[0]
)
architecture.SkipArchitectures(oc, architecture.MULTI, architecture.S390X, architecture.PPC64LE)
dockerFileCommands := `
RUN echo "echo 'Hello world! '$(whoami)" > /usr/bin/tc_54159_rpm_and_osimage && chmod 1755 /usr/bin/tc_54159_rpm_and_osimage
`
// Build the new osImage
osImageBuilder := OsImageBuilderInNode{node: node, dockerFileCommands: dockerFileCommands}
digestedImage, berr := osImageBuilder.CreateAndDigestOsImage()
o.Expect(berr).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Install rpm in first node
exutil.By("Installing rpm package in first working node")
logger.Infof("Copy yum repo to node")
o.Expect(node.CopyFromLocal(yumRepoTemplate, yumRepoFile)).
NotTo(o.HaveOccurred(),
"Error copying %s to %s in node %s", yumRepoTemplate, yumRepoFile, node.GetName())
// rpm-ostree only uses the proxy from the yum.repos.d configuration, it ignores the env vars.
logger.Infof("Configure proxy in yum")
_, err := node.DebugNodeWithChroot("sed", "-i", "s#proxy=#proxy="+proxy.GetOrFail(`{.status.httpProxy}`)+"#g", yumRepoFile)
o.Expect(err).NotTo(o.HaveOccurred(), "Error configuring the proxy in the centos yum repo config")
defer func() {
logger.Infof("Start defer logic to uninstall the %s rpm", rpmName)
waitErr := node.WaitUntilRpmOsTreeIsIdle()
if waitErr != nil {
node.CancelRpmOsTreeTransactions()
}
node.UninstallRpm(rpmName)
node.DebugNodeWithChroot("rm", yumRepoFile)
node.Reboot()
coreOSMcp.waitForComplete()
// Because of a bug in SNO after a reboot the controller cannot get the lease properly
// We wait until the controller gets the lease. We make sure that the next test will receive a fully clean environment with the controller ready
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "10m", "20s").Should(o.BeTrue(),
"Controller never acquired lease after the nodes was rebooted")
// Printing the status, apart from tracing the exact status of rpm-ostree,
// is a way of waiting for the node to be ready after the reboot, so that the next test case
// can be executed without problems. Because the status cannot be retreived until the node is ready.
status, _ := node.GetRpmOstreeStatus(false)
logger.Infof(status)
}()
// We wait, but we dont fail, if it does not become idle we cancel the transaction in the installation command
waitErr := node.WaitUntilRpmOsTreeIsIdle()
if waitErr != nil {
logger.Infof("rpm-ostree state is NOT IDLE. We cancel the current transactions to continue the test!!!")
cOut, err := node.CancelRpmOsTreeTransactions()
o.Expect(err).
NotTo(o.HaveOccurred(),
"Error cancelling transactions in node %s.\n%s", node.GetName(), cOut)
}
instOut, err := node.InstallRpm(rpmName)
logger.Debugf("Install rpm output: %s", instOut)
o.Expect(err).
NotTo(o.HaveOccurred(),
"Error installing '%s' rpm in node %s", rpmName, node.GetName())
o.Expect(node.Reboot()).To(o.Succeed(),
"Error rebooting node %s", node.GetName())
// In SNO clusters when we reboot the only node the connectivity is broken.
// Because the exutils.debugNode function fails the test if any problem happens
// we need to wait until the pool is stable (waitForComplete) before trying to debug the node again, even if we do it inside an Eventually instruction
coreOSMcp.waitForComplete()
logger.Infof("Check that the wget binary is available")
o.Eventually(func() error {
_, err := node.DebugNodeWithChroot("which", "wget")
return err
}, "15m", "20s").Should(o.Succeed(),
"Error. wget binay is not available after installing '%s' rpm in node %s.", rpmName, node.GetName())
logger.Infof("OK\n")
// Capture current rpm-ostree status
exutil.By("Capture the current ostree deployment")
o.Eventually(node.IsRpmOsTreeIdle, "10m", "20s").
Should(o.BeTrue(), "rpm-ostree status didn't become idle after installing wget")
initialDeployment, derr := node.GetBootedOsTreeDeployment(false)
o.Expect(derr).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in node %s", node.GetName())
logger.Infof("Current status with date:\n %s", initialDeployment)
o.Expect(initialDeployment).
To(o.MatchRegexp("LayeredPackages: .*%s", rpmName),
"rpm-ostree is not reporting the installed '%s' package in the rpm-ostree status command", rpmName)
initialBootedImage, err := node.GetCurrentBootOSImage()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the initial booted image")
logger.Infof("Initial booted osImage: %s", initialBootedImage)
logger.Infof("OK\n")
// Create MC and wait for MCP
exutil.By("Create a MC to deploy the new osImage")
layeringMcName := "layering-mc-54159"
layeringMC := NewMachineConfig(oc.AsAdmin(), layeringMcName, coreOSMcp.GetName())
layeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
layeringMC.skipWaitForMcp = true
defer layeringMC.delete()
layeringMC.create()
// Because of a bug in SNO after a reboot the controller cannot get the lease properly
// We wait until the controller gets the lease
o.Eventually(NewController(oc.AsAdmin()).HasAcquiredLease, "10m", "20s").Should(o.BeTrue(),
"Controller never acquired lease after the nodes was rebooted")
coreOSMcp.waitForComplete()
logger.Infof("The new osImage was deployed successfully\n")
// Check rpm-ostree status
exutil.By("Check that the rpm-ostree status is reporting the right booted image and installed rpm")
bootedDeployment, err := node.GetBootedOsTreeDeployment(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in node %s", node.GetName())
logger.Infof("Current rpm-ostree booted status:\n%s\n", bootedDeployment)
o.Expect(bootedDeployment).
To(o.MatchRegexp("LayeredPackages: .*%s", rpmName),
"rpm-ostree is not reporting the installed 'wget' package in the rpm-ostree status command")
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(digestedImage),
"container reference in the status is not reporting the right booted image")
logger.Infof("OK!\n")
// Check rpm is installed
exutil.By("Check that the rpm is installed even if we use the new osImage")
rpmOut, err := node.DebugNodeWithChroot("rpm", "-q", "wget")
o.Expect(err).
NotTo(o.HaveOccurred(),
"Error. %s rpm is not installed after changing the osImage in node %s.\n%s", rpmName, node.GetName(), rpmOut)
wOut, err := node.DebugNodeWithChroot("which", "wget")
o.Expect(err).
NotTo(o.HaveOccurred(),
"Error. wget binay is not available after installing '%s' rpm in node %s.\n%s", rpmName, node.GetName(), wOut)
logger.Infof("OK\n")
// Check osImage content
exutil.By("Check that the new osImage content was deployed properly")
rf := NewRemoteFile(node, "/usr/bin/tc_54159_rpm_and_osimage")
o.Expect(rf.Fetch()).
ShouldNot(o.HaveOccurred(),
"Error getting information about file %s in node %s", rf.GetFullPath(), node.GetName())
o.Expect(rf.GetNpermissions()).To(o.Equal("1755"),
"Error, permissions of %s in node %s are not the expected ones", rf.GetFullPath(), node.GetName())
o.Expect(rf.GetTextContent()).To(o.ContainSubstring("Hello world"),
"Error, content of %s in node %s is not the expected ones", rf.GetFullPath(), node.GetName())
logger.Infof("OK\n")
// Delete the MC and wait for MCP
exutil.By("Delete the MC so that original osImage is restored")
layeringMC.delete()
logger.Infof("MC was successfully deleted\n")
// Check the rpm-ostree status after the MC deletion
exutil.By("Check that the original ostree deployment was restored")
logger.Infof("Waiting for rpm-ostree status to be idle")
o.Eventually(node.IsRpmOsTreeIdle, "10m", "20s").
Should(o.BeTrue(), "rpm-ostree status didn't become idle after installing wget")
logger.Infof("Checking original status")
deployment, derr := node.GetBootedOsTreeDeployment(false) // for debugging
o.Expect(derr).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in node %s", node.GetName())
logger.Infof("Current status with date:\n %s", deployment)
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(initialBootedImage),
"Error! the initial osImage was not properly restored after deleting the MachineConfig")
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
f028040a-3681-4e94-bede-2cb04e466066
|
Author:sregidor-NonPreRelease-Medium-54049-[P2] Verify base images in the release image
|
['"os"', '"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
g.It("Author:sregidor-NonPreRelease-Medium-54049-[P2] Verify base images in the release image", func() {
var (
oldMachineConfigOsImage = "machine-os-content"
coreExtensions = "rhel-coreos-extensions"
)
exutil.By("Extract pull-secret")
pullSecret := GetPullSecret(oc.AsAdmin())
// TODO: when the code to create a tmp directory in the beforeEach section is merged, use ExtractToDir method instead
secretExtractDir, err := pullSecret.Extract()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error extracting pull-secret")
logger.Infof("Pull secret has been extracted to: %s\n", secretExtractDir)
dockerConfigFile := filepath.Join(secretExtractDir, ".dockerconfigjson")
exutil.By("Get base image for layering")
baseImage, err := getImageFromReleaseInfo(oc.AsAdmin(), LayeringBaseImageReleaseInfo, dockerConfigFile)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the base image to build new osImages")
logger.Infof("Base image: %s\n", baseImage)
exutil.By("Inspect base image information")
skopeoCLI := NewSkopeoCLI().SetAuthFile(dockerConfigFile)
inspectInfo, err := skopeoCLI.Run("inspect").Args("--tls-verify=false", "--config", "docker://"+baseImage).Output()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error using 'skopeo' to inspect base image %s", baseImage)
logger.Infof("Check if image is bootable")
inspectJSON := JSON(inspectInfo)
ostreeBootable := inspectJSON.Get("config").Get("Labels").Get("ostree.bootable").ToString()
o.Expect(ostreeBootable).To(o.Equal("true"),
`The base image %s is expected to be bootable (.config.Labels.ostree\.bootable == "true", but skopeo information says that it is not bootable. %s`,
baseImage, inspectInfo)
logger.Infof("OK!\n")
exutil.By("Verify that old machine config os content is not present in the release info")
mcOsIMage, _ := getImageFromReleaseInfo(oc.AsAdmin(), oldMachineConfigOsImage, dockerConfigFile)
o.Expect(mcOsIMage).To(o.ContainSubstring(`no image tag "`+oldMachineConfigOsImage+`" exists`),
"%s image should not be present in the release image, but we can find it with value %s", oldMachineConfigOsImage, mcOsIMage)
logger.Infof("OK!\n")
exutil.By("Verify that new core extensions image is present in the release info")
coreExtensionsValue, exErr := getImageFromReleaseInfo(oc.AsAdmin(), coreExtensions, dockerConfigFile)
o.Expect(exErr).NotTo(o.HaveOccurred(),
"Error getting the new core extensions image")
o.Expect(coreExtensionsValue).NotTo(o.BeEmpty(),
"%s image should be present in the release image, but we cannot find it with value %s", coreExtensions)
logger.Infof("%s is present in the release infor with value %s", coreExtensions, coreExtensionsValue)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
564f0488-8e1a-4e7d-b8d5-55a35482d6d7
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-54909-[P1] Configure extensions while using a custom osImage [Disruptive]
|
['"fmt"', '"os"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-54909-[P1] Configure extensions while using a custom osImage [Disruptive]", func() {
// Due to https://issues.redhat.com/browse/OCPBUGS-31255 in this test case pools will be degraded intermittently. They will be degraded and automatically fixed in a few minutes/seconds
// Because of that we need to use WaitForUpdatedStatus instead of waitForComplete, since WaitForUpdatedStatus will not fail if a pool is degraded for just a few minutes but the configuration is applied properly
architecture.SkipArchitectures(oc, architecture.MULTI, architecture.S390X, architecture.PPC64LE)
var (
rpmName = "zsh"
extensionRpmName = "usbguard"
dockerFileCommands = fmt.Sprintf(`
COPY openshift-config-user-ca-bundle.crt /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt
RUN update-ca-trust && \
rm /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt
RUN printf '[baseos]\nname=CentOS-$releasever - Base\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/BaseOS/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n[appstream]\nname=CentOS-$releasever - AppStream\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/AppStream/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n' > /etc/yum.repos.d/centos.repo && \
rpm-ostree install %s && \
rpm-ostree cleanup -m && \
ostree container commit
`, rpmName)
workerNode = NewNodeList(oc).GetAllCoreOsWokerNodesOrFail()[0]
masterNode = NewNodeList(oc).GetAllMasterNodesOrFail()[0]
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
mMcp.SetWaitingTimeForExtensionsChange()
wMcp.SetWaitingTimeForExtensionsChange()
defer mMcp.WaitForUpdatedStatus()
defer wMcp.WaitForUpdatedStatus()
// Build the new osImage
osImageBuilder := OsImageBuilderInNode{node: workerNode, dockerFileCommands: dockerFileCommands}
defer func() { _ = osImageBuilder.CleanUp() }()
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create MC to apply the config to worker nodes
exutil.By("Create a MC to deploy the new osImage in 'worker' pool")
wLayeringMcName := "tc-54909-layering-extensions-worker"
wLayeringMC := NewMachineConfig(oc.AsAdmin(), wLayeringMcName, MachineConfigPoolWorker)
wLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
wLayeringMC.skipWaitForMcp = true
defer wLayeringMC.deleteNoWait()
wLayeringMC.create()
// Create MC to apply the config to master nodes
exutil.By("Create a MC to deploy the new osImage in 'master' pool")
mLayeringMcName := "tc-54909-layering-extensions-master"
mLayeringMC := NewMachineConfig(oc.AsAdmin(), mLayeringMcName, MachineConfigPoolMaster)
mLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
mLayeringMC.skipWaitForMcp = true
defer mLayeringMC.deleteNoWait()
mLayeringMC.create()
// Wait for pools
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new osImage was deployed successfully in 'worker' pool\n")
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new osImage was deployed successfully in 'master' pool\n")
// Check rpm-ostree status in worker node
exutil.By("Check that the rpm-ostree status is reporting the right booted image in worker nodes")
wStatus, err := workerNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s", workerNode.GetName())
logger.Infof("Current rpm-ostree status in worker node:\n%s\n", wStatus)
wDeployment, err := workerNode.GetBootedOsTreeDeployment(true)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s", workerNode.GetName())
wContainerRef, jerr := JSON(wDeployment).GetSafe("container-image-reference")
o.Expect(jerr).NotTo(o.HaveOccurred(),
"We cant get 'container-image-reference' from the deployment status in worker node. Wrong rpm-ostree status!")
o.Expect(wContainerRef.ToString()).To(o.Equal("ostree-unverified-registry:"+digestedImage),
"container reference in the worker node's status is not the exepeced one")
logger.Infof("OK!\n")
// Check rpm-ostree status in master node
exutil.By("Check that the rpm-ostree status is reporting the right booted image in master nodes")
mStatus, err := masterNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s", masterNode.GetName())
logger.Infof("Current rpm-ostree status in master node:\n%s\n", mStatus)
mDeployment, err := masterNode.GetBootedOsTreeDeployment(true)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s", masterNode.GetName())
mContainerRef, jerr := JSON(mDeployment).GetSafe("container-image-reference")
o.Expect(jerr).NotTo(o.HaveOccurred(),
"We cant get 'container-image-reference' from the deployment status in master node. Wrong rpm-ostree status!")
o.Expect(mContainerRef.ToString()).To(o.Equal("ostree-unverified-registry:"+digestedImage),
"container reference in the master node's status is not the exepeced one")
logger.Infof("OK!\n")
// Check rpm is installed in worker node
exutil.By(fmt.Sprintf("Check that the %s rpm is installed in worker node", rpmName))
o.Expect(workerNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.", rpmName, workerNode.GetName())
logger.Infof("OK\n")
// Check rpm is installed in master node
exutil.By(fmt.Sprintf("Check that the %s rpm is installed in master node", rpmName))
o.Expect(masterNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", rpmName, workerNode.GetName())
logger.Infof("OK\n")
// Create MC to apply usbguard extension to worker nodes
exutil.By("Create a MC to deploy the usbgard extension in 'worker' pool")
wUsbguardMcName := "tc-54909-extension-usbguard-worker"
wUsbguardMC := NewMachineConfig(oc.AsAdmin(), wUsbguardMcName, MachineConfigPoolWorker).SetMCOTemplate("change-worker-extension-usbguard.yaml")
wUsbguardMC.skipWaitForMcp = true
defer wUsbguardMC.deleteNoWait()
wUsbguardMC.create()
// Create MC to apply usbguard extension to master nodes
exutil.By("Create a MC to deploy the usbguard extension in 'master' pool")
mUsbguardMcName := "tc-54909-extension-usbguard-master"
mUsbguardMC := NewMachineConfig(oc.AsAdmin(), mUsbguardMcName, MachineConfigPoolMaster).SetMCOTemplate("change-worker-extension-usbguard.yaml")
mUsbguardMC.skipWaitForMcp = true
defer mUsbguardMC.deleteNoWait()
mUsbguardMC.create()
// Wait for pools
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new config was applied successfully in 'worker' pool\n")
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new config was applied successfully in 'master' pool\n")
// Check that rpms are installed in worker node after the extension
exutil.By("Check that both rpms are installed in worker node after the extension")
o.Expect(workerNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.", rpmName, workerNode.GetName())
o.Expect(workerNode.RpmIsInstalled(extensionRpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.", extensionRpmName, workerNode.GetName())
logger.Infof("OK\n")
// Check that rpms are installed in master node after the extension
exutil.By("Check that both rpms are installed in master node after the extension")
o.Expect(masterNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", rpmName, masterNode.GetName())
o.Expect(masterNode.RpmIsInstalled(extensionRpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", extensionRpmName, masterNode.GetName())
logger.Infof("OK\n")
// Check rpm-ostree status in worker node after extension
exutil.By("Check that the rpm-ostree status is reporting the right booted image in worker nodes after the extension is installed")
wStatus, err = workerNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s after the extension is installed", workerNode.GetName())
logger.Infof("Current rpm-ostree status in worker node after extension:\n%s\n", wStatus)
o.Expect(wStatus).To(o.MatchRegexp("(?s)LayeredPackages:.*usbguard"),
"Status in worker node %s is not reporting the Layered %s package", workerNode.GetName(), extensionRpmName)
wDeployment, err = workerNode.GetBootedOsTreeDeployment(true)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s after the extension is installed", workerNode.GetName())
wContainerRef, jerr = JSON(wDeployment).GetSafe("container-image-reference")
o.Expect(jerr).NotTo(o.HaveOccurred(),
"We cant get 'container-image-reference' from the deployment status in worker node after the extension is installed. Wrong rpm-ostree status!")
o.Expect(wContainerRef.ToString()).To(o.Equal("ostree-unverified-registry:"+digestedImage),
"container reference in the worker node's status is not the exepeced one after the extension is installed")
logger.Infof("OK!\n")
// Check rpm-ostree status in master node after the extension
exutil.By("Check that the rpm-ostree status is reporting the right booted image in master nodes after the extension is installed")
mStatus, err = masterNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s after the extension is installed", masterNode.GetName())
logger.Infof("Current rpm-ostree status in master node:\n%s\n", mStatus)
o.Expect(mStatus).To(o.MatchRegexp("(?s)LayeredPackages:.*usbguard"),
"Status in master node %s is not reporting the Layered %s package", workerNode.GetName(), extensionRpmName)
mDeployment, err = masterNode.GetBootedOsTreeDeployment(true)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s the extension is installed", masterNode.GetName())
mContainerRef, jerr = JSON(mDeployment).GetSafe("container-image-reference")
o.Expect(jerr).NotTo(o.HaveOccurred(),
"We cant get 'container-image-reference' from the deployment status in master node after the extension is installed. Wrong rpm-ostree status!")
o.Expect(mContainerRef.ToString()).To(o.Equal("ostree-unverified-registry:"+digestedImage),
"container reference in the master node's status is not the exepeced one after the extension is installed")
logger.Infof("OK!\n")
exutil.By("Remove custom layering MCs")
wLayeringMC.deleteNoWait()
mLayeringMC.deleteNoWait()
logger.Infof("OK!\n")
// Wait for pools
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new config was applied successfully in 'worker' pool\n")
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("The new config was applied successfully in 'master' pool\n")
// Check that extension rpm is installed in the worker node, but custom layering rpm is not
exutil.By("Check that extension rpm is installed in worker node but custom layering rpm is not")
o.Expect(workerNode.RpmIsInstalled(rpmName)).
To(o.BeFalse(),
"Error. %s rpm is installed in worker node %s but it should not be installed.", rpmName, workerNode.GetName())
o.Expect(workerNode.RpmIsInstalled(extensionRpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.\n%s", extensionRpmName, workerNode.GetName())
logger.Infof("OK\n")
// Check that extension rpm is installed in the master node, but custom layering rpm is not
exutil.By("Check that both rpms are installed in master node")
o.Expect(masterNode.RpmIsInstalled(rpmName)).
To(o.BeFalse(),
"Error. %s rpm is installed in master node %s but it should not be installed.", rpmName, masterNode.GetName())
o.Expect(masterNode.RpmIsInstalled(extensionRpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", extensionRpmName, masterNode.GetName())
logger.Infof("OK\n")
// Check rpm-ostree status in worker node after deleting custom osImage
exutil.By("Check that the rpm-ostree status is reporting the right booted image in worker nodes after deleting custom osImage")
wStatus, err = workerNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s after deleting custom osImage", workerNode.GetName())
logger.Infof("Current rpm-ostree status in worker node after deleting custom osImage:\n%s\n", wStatus)
o.Expect(wStatus).To(o.MatchRegexp("(?s)LayeredPackages:.*usbguard"),
"Status in worker node %s is not reporting the Layered %s package after deleting custom osImage", workerNode.GetName(), extensionRpmName)
o.Expect(wStatus).NotTo(o.ContainSubstring(digestedImage),
"Status in worker node %s is reporting the custom osImage, but it shouldn't because custom osImage was deleted", workerNode.GetName(), extensionRpmName)
logger.Infof("OK!\n")
// Check rpm-ostree status in master node after deleting custom osImage
exutil.By("Check that the rpm-ostree status is reporting the right booted image in master nodes after deleting custom osImage")
mStatus, err = masterNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s after deleting custom osIMage", masterNode.GetName())
logger.Infof("Current rpm-ostree status in master node:\n%s\n", mStatus)
o.Expect(mStatus).To(o.MatchRegexp("(?s)LayeredPackages:.*usbguard"),
"Status in master node %s is not reporting the Layered %s package after deleting custom osImage", workerNode.GetName(), extensionRpmName)
o.Expect(mStatus).NotTo(o.ContainSubstring(digestedImage),
"Status in master node %s is reporting the custom osImage, but it shouldn't because custom osImage was deleted", workerNode.GetName(), extensionRpmName)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
0d03da5c-8567-4958-acfb-95930c524f13
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-54915-[P1] Configure kerneltype while using a custom osImage [Disruptive]
|
['"fmt"', '"os"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-54915-[P1] Configure kerneltype while using a custom osImage [Disruptive]", func() {
// Due to https://issues.redhat.com/browse/OCPBUGS-31255 in this test case pools will be degraded intermittently. They will be degraded and automatically fixed in a few minutes/seconds
// Because of that we need to use WaitForUpdatedStatus instead of waitForComplete, since WaitForUpdatedStatus will not fail if a pool is degraded for just a few minutes but the configuration is applied properly
architecture.SkipArchitectures(oc, architecture.MULTI, architecture.S390X, architecture.PPC64LE, architecture.ARM64)
skipTestIfSupportedPlatformNotMatched(oc, AWSPlatform, GCPPlatform)
var (
rpmName = "zsh"
dockerFileCommands = fmt.Sprintf(`
RUN printf '[baseos]\nname=CentOS-$releasever - Base\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/BaseOS/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n[appstream]\nname=CentOS-$releasever - AppStream\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/AppStream/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n' > /etc/yum.repos.d/centos.repo && \
rpm-ostree install %s && \
rpm-ostree cleanup -m && \
ostree container commit
`, rpmName)
rtMcTemplate = "set-realtime-kernel.yaml"
workerNode = NewNodeList(oc).GetAllCoreOsWokerNodesOrFail()[0]
masterNode = NewNodeList(oc).GetAllMasterNodesOrFail()[0]
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
mMcp.SetWaitingTimeForKernelChange()
wMcp.SetWaitingTimeForKernelChange()
defer mMcp.WaitForUpdatedStatus()
defer wMcp.WaitForUpdatedStatus()
// Create a MC to use realtime kernel in the worker pool
exutil.By("Create machine config to enable RT kernel in worker pool")
wRtMcName := "50-realtime-kernel-worker"
wRtMc := NewMachineConfig(oc.AsAdmin(), wRtMcName, MachineConfigPoolWorker).SetMCOTemplate(rtMcTemplate)
wRtMc.skipWaitForMcp = true
defer wRtMc.deleteNoWait()
// TODO: When we extract the "mcp.waitForComplete" from the "create" method, we need to take into account that if
// we are configuring a rt-kernel we need to wait longer.
wRtMc.create()
logger.Infof("OK!\n")
// Create a MC to use realtime kernel in the master pool
exutil.By("Create machine config to enable RT kernel in master pool")
mRtMcName := "50-realtime-kernel-master"
mRtMc := NewMachineConfig(oc.AsAdmin(), mRtMcName, MachineConfigPoolMaster).SetMCOTemplate(rtMcTemplate)
mRtMc.skipWaitForMcp = true
defer mRtMc.deleteNoWait()
mRtMc.create()
logger.Infof("OK!\n")
// Wait for the pools to be updated
exutil.By("Wait for pools to be updated after applying the new realtime kernel")
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
// Check that realtime kernel is active in worker nodes
exutil.By("Check realtime kernel in worker nodes")
o.Expect(workerNode.IsRealTimeKernel()).Should(o.BeTrue(),
"Kernel is not realtime kernel in worker node %s", workerNode.GetName())
logger.Infof("OK!\n")
// Check that realtime kernel is active in master nodes
exutil.By("Check realtime kernel in master nodes")
o.Expect(masterNode.IsRealTimeKernel()).Should(o.BeTrue(),
"Kernel is not realtime kernel in master node %s", masterNode.GetName())
logger.Infof("OK!\n")
// Build the new osImage
exutil.By("Build a custom osImage")
osImageBuilder := OsImageBuilderInNode{node: workerNode, dockerFileCommands: dockerFileCommands}
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create MC to apply the config to worker nodes
exutil.By("Create a MC to deploy the new osImage in 'worker' pool")
wLayeringMcName := "tc-54915-layering-kerneltype-worker"
wLayeringMC := NewMachineConfig(oc.AsAdmin(), wLayeringMcName, MachineConfigPoolWorker)
wLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
wLayeringMC.skipWaitForMcp = true
defer wLayeringMC.deleteNoWait()
wLayeringMC.create()
logger.Infof("OK!\n")
// Create MC to apply the config to master nodes
exutil.By("Create a MC to deploy the new osImage in 'master' pool")
mLayeringMcName := "tc-54915-layering-kerneltype-master"
mLayeringMC := NewMachineConfig(oc.AsAdmin(), mLayeringMcName, MachineConfigPoolMaster)
mLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
mLayeringMC.skipWaitForMcp = true
defer mLayeringMC.deleteNoWait()
mLayeringMC.create()
logger.Infof("OK!\n")
// Wait for the pools to be updated
exutil.By("Wait for pools to be updated after applying the new osImage")
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
// Check rpm is installed in worker node
exutil.By("Check that the rpm is installed in worker node")
o.Expect(workerNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.", rpmName, workerNode.GetName())
wStatus, err := workerNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s", masterNode.GetName())
o.Expect(wStatus).Should(o.And(
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-core"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-kvm"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-modules"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-modules-extra"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-core"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-modules"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-modules-extra")),
"rpm-ostree status is not reporting the kernel layered packages properly")
logger.Infof("OK\n")
// Check rpm is installed in master node
exutil.By("Check that the rpm is installed in master node")
o.Expect(masterNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", rpmName, workerNode.GetName())
mStatus, err := masterNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in master node %s", masterNode.GetName())
o.Expect(mStatus).Should(o.And(
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-core"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-kvm"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-modules"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-rt-modules-extra"),
o.MatchRegexp("(?s)RemovedBasePackages: .*kernel-core"),
o.MatchRegexp("(?s)RemovedBasePackages: .*kernel-modules"),
o.MatchRegexp("(?s)RemovedBasePackages: .*kernel"),
o.MatchRegexp("(?s)RemovedBasePackages: .*kernel-modules-extra")),
"rpm-ostree status is not reporting the kernel layered packages properly")
logger.Infof("OK\n")
// Check that realtime kernel is active in worker nodes
exutil.By("Check realtime kernel in worker nodes")
o.Expect(workerNode.IsRealTimeKernel()).Should(o.BeTrue(),
"Kernel is not realtime kernel in worker node %s", workerNode.GetName())
logger.Infof("OK!\n")
// Check that realtime kernel is active in master nodes
exutil.By("Check realtime kernel in master nodes")
o.Expect(masterNode.IsRealTimeKernel()).Should(o.BeTrue(),
"Kernel is not realtime kernel in master node %s", masterNode.GetName())
logger.Infof("OK!\n")
// Delete realtime configs
exutil.By("Delete the realtime kernel MCs")
wRtMc.deleteNoWait()
mRtMc.deleteNoWait()
logger.Infof("OK!\n")
// Wait for the pools to be updated
exutil.By("Wait for pools to be updated after deleting the realtime kernel configs")
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
// Check that realtime kernel is not active in worker nodes anymore
exutil.By("Check realtime kernel in worker nodes")
o.Expect(workerNode.IsRealTimeKernel()).Should(o.BeFalse(),
"Realtime kernel should not be active anymore in worker node %s", workerNode.GetName())
logger.Infof("OK!\n")
// Check that realtime kernel is not active in master nodes anymore
exutil.By("Check realtime kernel in master nodes")
o.Expect(masterNode.IsRealTimeKernel()).Should(o.BeFalse(),
"Realtime kernel should not be active anymore in master node %s", masterNode.GetName())
logger.Infof("OK!\n")
// Check rpm is installed in worker node
exutil.By("Check that the rpm is installed in worker node")
o.Expect(workerNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in worker node %s.", rpmName, workerNode.GetName())
wStatus, err = workerNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s", masterNode.GetName())
o.Expect(wStatus).ShouldNot(o.And(
o.ContainSubstring("LayeredPackages"),
o.ContainSubstring("RemovedBasePackages")),
"rpm-ostree status is not reporting the kernel layered packages properly in worker node %s", workerNode.GetName())
logger.Infof("OK\n")
// Check rpm is installed in master node
exutil.By("Check that the rpm is installed in master node")
o.Expect(masterNode.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in master node %s.", rpmName, workerNode.GetName())
mStatus, err = masterNode.GetRpmOstreeStatus(false)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the rpm-ostree status value in worker node %s", masterNode.GetName())
o.Expect(mStatus).ShouldNot(o.And(
o.ContainSubstring("LayeredPackages"),
o.ContainSubstring("RemovedBasePackages")),
"rpm-ostree status is not reporting the kernel layered packages properly in master node %s", workerNode.GetName())
logger.Infof("OK\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
7d11b0d9-43ed-406a-a4f7-506b36d85f97
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Medium-55002-[P2] Get OSImageURL override related metric data available in telemetry [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Medium-55002-[P2] Get OSImageURL override related metric data available in telemetry [Disruptive]", func() {
// Due to https://issues.redhat.com/browse/OCPBUGS-31255 in this test case pools will be degraded intermittently. They will be degraded and automatically fixed in a few minutes/seconds
// Because of that we need to use WaitForUpdatedStatus instead of waitForComplete, since WaitForUpdatedStatus will not fail if a pool is degraded for just a few minutes but the configuration is applied properly
var (
osImageURLOverrideQuery = `os_image_url_override`
dockerFileCommands = "RUN touch /etc/hello-world-file"
workerNode = NewNodeList(oc).GetAllCoreOsWokerNodesOrFail()[0]
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
exutil.By("Check that the metric is exposed to telemetry")
expectedExposedMetric := fmt.Sprintf(`{__name__=\"%s:sum\"}`, osImageURLOverrideQuery)
telemetryConfig := NewNamespacedResource(oc.AsAdmin(), "Configmap", "openshift-monitoring", "telemetry-config")
o.Expect(telemetryConfig.Get(`{.data}`)).To(o.ContainSubstring(expectedExposedMetric),
"Metric %s, is not exposed to telemetry", osImageURLOverrideQuery)
exutil.By("Validating initial os_image_url_override values")
mon, err := exutil.NewPrometheusMonitor(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating new thanos monitor")
osImageOverride, err := mon.SimpleQuery(osImageURLOverrideQuery)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error querying metric: %s", osImageURLOverrideQuery)
// Here we are logging both master and worker pools
logger.Infof("Initial %s query: %s", osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate worker pool's %s value", osImageURLOverrideQuery)
o.Expect(wMcp.GetReportedOsImageOverrideValue()).To(o.Equal("0"),
"Worker pool's %s initial value should be 0. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate master pool's %s value", osImageURLOverrideQuery)
o.Expect(mMcp.GetReportedOsImageOverrideValue()).To(o.Equal("0"),
"Master pool's %s initial value should be 0. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("OK!\n")
// Build the new osImage
exutil.By("Build a custom osImage")
osImageBuilder := OsImageBuilderInNode{node: workerNode, dockerFileCommands: dockerFileCommands}
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create MC to apply the config to worker nodes
exutil.By("Create a MC to deploy the new osImage in 'worker' pool")
wLayeringMcName := "tc-55002-layering-telemetry-worker"
wLayeringMC := NewMachineConfig(oc.AsAdmin(), wLayeringMcName, MachineConfigPoolWorker)
wLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
wLayeringMC.skipWaitForMcp = true
defer mMcp.WaitForUpdatedStatus()
defer wMcp.WaitForUpdatedStatus()
defer wLayeringMC.deleteNoWait()
wLayeringMC.create()
logger.Infof("OK!\n")
// Create MC to apply the config to master nodes
exutil.By("Create a MC to deploy the new osImage in 'master' pool")
mLayeringMcName := "tc-55002-layering-telemetry-master"
mLayeringMC := NewMachineConfig(oc.AsAdmin(), mLayeringMcName, MachineConfigPoolMaster)
mLayeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
mLayeringMC.skipWaitForMcp = true
defer mLayeringMC.deleteNoWait()
mLayeringMC.create()
logger.Infof("OK!\n")
// Wait for the pools to be updated
exutil.By("Wait for pools to be updated after applying the new osImage")
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
exutil.By("Validating os_image_url_override values with overridden master and worker pools")
osImageOverride, err = mon.SimpleQuery(osImageURLOverrideQuery)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error querying metric: %s", osImageURLOverrideQuery)
// Here we are logging both master and worker pools
logger.Infof("Executed %s query: %s", osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate worker pool's %s value", osImageURLOverrideQuery)
o.Expect(wMcp.GetReportedOsImageOverrideValue()).To(o.Equal("1"),
"Worker pool's %s value with overridden master and worker pools should be 1. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate master pool's %s value", osImageURLOverrideQuery)
o.Expect(mMcp.GetReportedOsImageOverrideValue()).To(o.Equal("1"),
"Master pool's %s value with overridden master and worker pools should be 1. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("OK!\n")
exutil.By("Delete the MC that overrides worker pool's osImage and wait for the pool to be updated")
wLayeringMC.deleteNoWait()
o.Expect(wMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
exutil.By("Validating os_image_url_override values with overridden master pool only")
osImageOverride, err = mon.SimpleQuery(osImageURLOverrideQuery)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error querying metric: %s", osImageURLOverrideQuery)
// Here we are logging both master and worker pools
logger.Infof("Executed %s query: %s", osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate worker pool's %s value", osImageURLOverrideQuery)
o.Expect(wMcp.GetReportedOsImageOverrideValue()).To(o.Equal("0"),
"Worker pool's %s value should be 0 when only the master pool is overridden. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate master pool's %s value", osImageURLOverrideQuery)
o.Expect(mMcp.GetReportedOsImageOverrideValue()).To(o.Equal("1"),
"Master pool's %s value should be 1 when only the master pool is overridden. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("OK!\n")
exutil.By("Delete the MC that overrides master pool's osImage and wait for the pool to be updated")
mLayeringMC.deleteNoWait()
o.Expect(mMcp.WaitForUpdatedStatus()).To(o.Succeed())
logger.Infof("OK!\n")
exutil.By("Validating os_image_url_override when no pool is overridden")
osImageOverride, err = mon.SimpleQuery(osImageURLOverrideQuery)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error querying metric: %s", osImageURLOverrideQuery)
// Here we are logging both master and worker pools
logger.Infof("Executed %s query: %s", osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate worker pool's %s value", osImageURLOverrideQuery)
o.Expect(wMcp.GetReportedOsImageOverrideValue()).To(o.Equal("0"),
"Worker pool's %s value should be 0 when no pool is overridden. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("Validate master pool's %s value", osImageURLOverrideQuery)
o.Expect(mMcp.GetReportedOsImageOverrideValue()).To(o.Equal("0"),
"Master pool's %s value should be 0 when no pool is overridden. Instead reported metric is: %s",
osImageURLOverrideQuery, osImageOverride)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
7f3ded57-2e84-42dd-8b1e-5d9357fe5f6d
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Medium-54056-Update osImage using the internal registry to store the image [Disruptive]
|
['"fmt"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Medium-54056-Update osImage using the internal registry to store the image [Disruptive]", func() {
var (
osImageNewFilePath = "/etc/hello-tc-54056"
dockerFileCommands = fmt.Sprintf(`
RUN touch %s
`, osImageNewFilePath)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
)
SkipTestIfCannotUseInternalRegistry(oc.AsAdmin())
architecture.SkipArchitectures(oc, architecture.MULTI, architecture.S390X, architecture.PPC64LE)
// Select the nodes
builderNode := mMcp.GetNodesOrFail()[0] // We always build the image in a master node to make sure it is CoreOs
// We test the image in a compact/sno compatible pool
mcp := GetCompactCompatiblePool(oc.AsAdmin())
if len(mcp.GetCoreOsNodesOrFail()) == 0 {
logger.Infof("The worker pool has no CoreOs nodes, we will use master pool for testing the osImage")
mcp = mMcp
}
node := mcp.GetCoreOsNodesOrFail()[0]
logger.Infof("Using pool %s and node %s for testing", mcp.GetName(), node.GetName())
// Build the new osImage
osImageBuilder := OsImageBuilderInNode{node: builderNode, dockerFileCommands: dockerFileCommands}
osImageBuilder.UseInternalRegistry = true
defer func() { _ = osImageBuilder.CleanUp() }()
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("Digested Image: %s", digestedImage)
logger.Infof("OK\n")
// Create MC and wait for MCP
exutil.By("Create a MC to deploy the new osImage")
layeringMcName := "layering-mc"
layeringMC := NewMachineConfig(oc.AsAdmin(), layeringMcName, mcp.GetName())
layeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
defer layeringMC.delete()
layeringMC.create()
mcp.waitForComplete()
logger.Infof("The new osImage was deployed successfully\n")
logger.Infof("OK\n")
// Check image content
exutil.By("Load remote resources to verify that the osImage content has been deployed properly")
tc54056File := NewRemoteFile(node, osImageNewFilePath)
o.Expect(tc54056File.Exists()).To(o.BeTrue(),
"The file %s included in the osImage should exist in the node %s, but it does not", osImageNewFilePath, node.GetName())
o.Expect(tc54056File.Fetch()).To(o.Succeed(),
"The content of file %s could not be retreived from node %s", osImageNewFilePath, node.GetName())
o.Expect(tc54056File.GetTextContent()).To(o.BeEmpty(),
"The file %s should be empty, but it is not. Current content: %s", osImageNewFilePath, tc54056File.GetTextContent())
logger.Infof("OK\n")
// Delete the MC and wait for MCP
exutil.By("Delete the MC so that the original osImage is restored")
layeringMC.delete()
mcp.waitForComplete()
logger.Infof("MC was successfully deleted\n")
logger.Infof("OK\n")
exutil.By("Check that the included new content is not present anymore")
o.Expect(tc54056File.Exists()).To(o.BeFalse(),
"The file %s included in the osImage should exist in the node %s, but it does not", osImageNewFilePath, node.GetName())
logger.Infof("OK\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
55980cd3-769e-40cc-b7fd-133e9efd7a05
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-67789-[P1] Configure 64k-pages kerneltype while using a custom osImage [Disruptive]
|
['"fmt"', '"os"', '"github.com/openshift/openshift-tests-private/test/extended/util/architecture"', 'clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_layering.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-67789-[P1] Configure 64k-pages kerneltype while using a custom osImage [Disruptive]", func() {
var (
mcTemplate64k = "set-64k-pages-kernel.yaml"
rpmName = "zsh"
dockerFileCommands = fmt.Sprintf(`
RUN printf '[baseos]\nname=CentOS-$releasever - Base\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/BaseOS/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n[appstream]\nname=CentOS-$releasever - AppStream\nbaseurl=http://mirror.stream.centos.org/$releasever-stream/AppStream/$basearch/os/\ngpgcheck=0\nenabled=1\nproxy='$HTTPS_PROXY'\n\n' > /etc/yum.repos.d/centos.repo && \
rpm-ostree install %s && \
rpm-ostree cleanup -m && \
ostree container commit
`, rpmName)
)
architecture.SkipIfNoNodeWithArchitectures(oc.AsAdmin(), architecture.ARM64)
clusterinfra.SkipTestIfNotSupportedPlatform(oc.AsAdmin(), clusterinfra.GCP)
createdCustomPoolName := fmt.Sprintf("mco-test-%s", architecture.ARM64)
defer DeleteCustomMCP(oc.AsAdmin(), createdCustomPoolName)
mcp, nodes := GetPoolAndNodesForArchitectureOrFail(oc.AsAdmin(), createdCustomPoolName, architecture.ARM64, 1)
node := nodes[0]
mcp.SetWaitingTimeForKernelChange() // Increase waiting time
// Create a MC to use 64k-pages kernel
exutil.By("Create machine config to enable 64k-pages kernel")
mcName64k := fmt.Sprintf("tc-67789-64k-pages-kernel-%s", mcp.GetName())
mc64k := NewMachineConfig(oc.AsAdmin(), mcName64k, mcp.GetName()).SetMCOTemplate(mcTemplate64k)
defer mc64k.delete()
mc64k.create()
logger.Infof("OK!\n")
// Check that 64k-pages kernel is active
exutil.By("Check 64k-pages kernel")
o.Expect(node.Is64kPagesKernel()).Should(o.BeTrue(),
"Kernel is not 64k-pages kernel in node %s", node.GetName())
logger.Infof("OK!\n")
// Build the new osImage
exutil.By("Build a custom osImage")
osImageBuilder := OsImageBuilderInNode{node: node, dockerFileCommands: dockerFileCommands}
digestedImage, err := osImageBuilder.CreateAndDigestOsImage()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error creating the new osImage")
logger.Infof("OK\n")
// Create a MC to apply the config
exutil.By("Create a MC to deploy the new osImage")
layeringMcName := fmt.Sprintf("tc-67789-layering-64kpages-%s", mcp.GetName())
layeringMC := NewMachineConfig(oc.AsAdmin(), layeringMcName, mcp.GetName())
layeringMC.parameters = []string{"OS_IMAGE=" + digestedImage}
layeringMC.skipWaitForMcp = true
defer layeringMC.deleteNoWait()
layeringMC.create()
mcp.waitForComplete()
logger.Infof("OK!\n")
// Check that the expected (zsh+64k-pages kernel) rpms are installed
exutil.By("Check that all the expected rpms are installed")
o.Expect(
node.RpmIsInstalled(rpmName),
).To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in node %s.", rpmName, node.GetName())
o.Expect(
node.GetRpmOstreeStatus(false),
).Should(o.And(
o.MatchRegexp("(?s)LayeredPackages:.*kernel-64k-core"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-64k-modules"),
o.MatchRegexp("(?s)LayeredPackages:.*kernel-64k-modules-extra"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-core"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-modules"),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel "),
o.MatchRegexp("(?s)RemovedBasePackages:.*kernel-modules-extra")),
"rpm-ostree status is not reporting the kernel layered packages properly")
logger.Infof("OK\n")
// Check that 64k-pages kernel is active
exutil.By("Check 64k-pages kernel")
o.Expect(node.Is64kPagesKernel()).Should(o.BeTrue(),
"Kernel is not 64k-pages kernel in node %s", node.GetName())
logger.Infof("OK!\n")
// Delete 64k-pages config
exutil.By("Delete the 64k-pages kernel MC")
mc64k.delete()
logger.Infof("OK!\n")
// Check that 64k-pages kernel is not installed anymore
exutil.By("Check that 64k-pages kernel is not installed anymore")
o.Expect(node.Is64kPagesKernel()).Should(o.BeFalse(),
"Huge pages kernel should not be installed anymore in node %s", node.GetName())
logger.Infof("OK!\n")
// Check zsh rpm is installed
exutil.By("Check that the zsh rpm is still installed after we removed the 64k-pages kernel")
o.Expect(node.RpmIsInstalled(rpmName)).
To(o.BeTrue(),
"Error. %s rpm is not installed after changing the osImage in node %s.", rpmName, node.GetName())
o.Expect(
node.GetRpmOstreeStatus(false),
).ShouldNot(o.And(
o.ContainSubstring("LayeredPackages"),
o.ContainSubstring("RemovedBasePackages")),
"rpm-ostree status is not reporting the layered packages properly in node %s", node.GetName())
logger.Infof("OK\n")
})
| |||||
test
|
openshift/openshift-tests-private
|
e440f4dd-c4d2-4b6c-bd9e-cd1167b778e0
|
mco_machineconfignode
|
import (
"fmt"
"strconv"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_machineconfignode.go
|
package mco
import (
"fmt"
"strconv"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
var _ = g.Describe("[sig-mco] MCO MachineConfigNode", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("mco-machineconfignode", exutil.KubeConfigPath())
)
g.JustBeforeEach(func() {
preChecks(oc)
// featureGate MachineConfigNode in included in featureSet: TechPreviewNoUpgrade
// skip the test if featureSet is not there
if !exutil.IsTechPreviewNoUpgrade(oc) {
g.Skip("featureSet: TechPreviewNoUpgrade is required for this test")
}
})
g.It("Author:rioliu-NonPreRelease-Critical-69184-[P1][OnCLayer] Enable feature gate MachineConfigNodes [Serial]", func() {
// need to check whether featureGate MachineConfigNodes is in enabled list
exutil.By("Check whether featureGate: MachineConfigNodes is enabled")
featureGate := NewResource(oc.AsAdmin(), "featuregate", "cluster")
enabled := featureGate.GetOrFail(`{.status.featureGates[*].enabled}`)
logger.Infof("enabled featuregates: %s", enabled)
o.Expect(enabled).Should(o.ContainSubstring("MachineConfigNodes"), "featureGate: MachineConfigNodes cannot be found")
})
g.It("Author:rioliu-NonPreRelease-High-69187-[P2][OnCLayer] validate MachineConfigNodes properties [Serial]", func() {
nodes := NewNodeList(oc.AsAdmin()).GetAllLinuxWorkerNodesOrFail()
for _, node := range nodes {
mcn := NewMachineConfigNode(oc.AsAdmin(), node.GetName())
exutil.By(fmt.Sprintf("Check MachineConfigNode properties for %s", node.GetName()))
logger.Infof("Check spec.configVersion.desired")
desiredOfNode := node.GetDesiredMachineConfig()
desiredOfMCNSpec := mcn.GetDesiredMachineConfigOfSpec()
o.Expect(desiredOfNode).Should(o.Equal(desiredOfMCNSpec), "desired config of node is not same as machineconfignode.spec")
logger.Infof("Check spec.pool")
poolOfNode := node.GetPrimaryPoolOrFail().GetName()
poolOfMCNSpec := mcn.GetPool()
o.Expect(poolOfNode).Should(o.Equal(poolOfMCNSpec), "pool of node is not same as machineconfignode.spec")
logger.Infof("Check spec.node")
nodeOfMCNSpec := mcn.GetNode()
o.Expect(node.GetName()).Should(o.Equal(nodeOfMCNSpec), "node name is not same as machineconfignode.spec")
logger.Infof("Check status.configVersion.current")
currentOfNode := node.GetCurrentMachineConfig()
currentOfMCNStatus := mcn.GetCurrentMachineConfigOfStatus()
o.Expect(currentOfNode).Should(o.Equal(currentOfMCNStatus), "current config of node is not same as machineconfignode.status")
logger.Infof("Check status.configVersion.desired")
desiredOfMCNStatus := mcn.GetDesiredMachineConfigOfStatus()
o.Expect(desiredOfNode).Should(o.Equal(desiredOfMCNStatus), "desired config of node is not same as machineconfignode.status")
logger.Infof("OK\n")
}
})
g.It("Author:rioliu-NonPreRelease-Longduration-High-69197-[OnCLayer] validate MachineConfigNode condition status transition [Disruptive]", func() {
var (
mcName = "create-test-file"
fileConfig = getURLEncodedFileConfig("/etc/test-file", "hello", "420")
workerMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
)
// create machine config to apply a file change
exutil.By("Create a test file on node")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetMCOTemplate(GenericMCTemplate)
mc.SetParams(fmt.Sprintf("FILES=[%s]", fileConfig))
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
exutil.By("Check conidition status of MachineConfigNode")
// get 1st updating worker nodes
workerNode := workerMcp.GetCordonedNodes()[0]
// if test fail, need to waiting for mcp to complete, then rollback the change
defer workerMcp.waitForComplete()
mcn := NewMachineConfigNode(oc.AsAdmin(), workerNode.GetName())
logger.Infof("Checking Updated False")
o.Eventually(mcn.GetUpdated, "1m", "5s").Should(o.Equal("False"))
logger.Infof("Checking UpdatePrepared")
o.Eventually(mcn.GetUpdatePrepared, "1m", "3s").Should(o.Equal("True"))
logger.Infof("Checking UpdateCompatible")
o.Eventually(mcn.GetUpdateCompatible, "3m", "3s").Should(o.Equal("True"))
logger.Infof("Checking UpdateExecuted Unknown")
o.Eventually(mcn.GetUpdateExecuted, "1m", "3s").Should(o.Equal("Unknown"))
logger.Infof("Checking Cordoned")
o.Eventually(mcn.GetCordoned, "30s", "3s").Should(o.Equal("True"))
logger.Infof("Checking Drained Unknown")
o.Eventually(mcn.GetDrained, "30s", "2s").Should(o.Equal("Unknown"))
logger.Infof("Checking Drained")
o.Eventually(mcn.GetDrained, "5m", "2s").Should(o.Equal("True"))
logger.Infof("Checking AppliedFilesAndOS Unknown")
o.Eventually(mcn.GetAppliedFilesAndOS, "1m", "1s").Should(o.Equal("Unknown"))
logger.Infof("Checking AppliedFilesAndOS")
o.Eventually(mcn.GetAppliedFilesAndOS, "3m", "2s").Should(o.Equal("True"))
logger.Infof("Checking UpdateExecuted")
o.Eventually(mcn.GetUpdateExecuted, "20s", "5s").Should(o.Equal("True"))
logger.Infof("Checking UpdatePostActionComplete")
o.Eventually(mcn.GetUpdatePostActionComplete, "30m", "5s").Should(o.Equal("Unknown"))
logger.Infof("Checking RebootedNode Unknown")
o.Eventually(mcn.GetRebootedNode, "15s", "3s").Should(o.Equal("Unknown"))
logger.Infof("Checking RebootedNode")
o.Eventually(mcn.GetRebootedNode, "5m", "5s").Should(o.Equal("True"))
logger.Infof("Checking Resumed")
o.Eventually(mcn.GetResumed, "15s", "5s").Should(o.Equal("True"))
logger.Infof("Checking UpdateComplete")
o.Eventually(mcn.GetUpdateComplete, "10s", "5s").Should(o.Equal("True"))
logger.Infof("Checking Uncordoned")
o.Eventually(mcn.GetUncordoned, "10s", "2s").Should(o.Equal("True"))
logger.Infof("Checking Updated")
o.Eventually(mcn.GetUpdated, "1m", "5s").Should(o.Equal("True"))
})
// After this PR https://github.com/openshift/machine-config-operator/pull/4158 MCs are checked before applying it to the nodes, and if they cannot be rendered
// then the MCP is drectly degraded without modifying any node. Hence, we cannot check "UpdatePrepared/UpdateCompatible" condition using this test case.
// We deprecate the test case until finding a way to test this condition properly.
g.It("Author:rioliu-DEPRECATED-NonPreRelease-Medium-69216-MachineConfigNode UpdateCompatible is Unknown when MC contains kernel argument in ignition section [Disruptive]", func() {
var (
mcName = "mco-tc-66376-reject-ignition-kernel-arguments"
mcTemplate = "add-ignition-kernel-arguments.yaml"
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
expectedMessage = "ignition kargs section contains changes"
expectedMCNStatus = "Unknown"
)
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName()).SetMCOTemplate(mcTemplate)
mc.skipWaitForMcp = true
exutil.By("Create invalid MC")
defer func() {
mc.deleteNoWait()
o.Expect(mcp.WaitForNotDegradedStatus()).NotTo(o.HaveOccurred(), "mcp worker is not recovered from degraded sate")
}()
mc.create()
exutil.By("Check condition NodeDegraded of worker pool")
o.Eventually(mcp, "2m", "5s").Should(HaveConditionField("NodeDegraded", "status", o.Equal("True")))
o.Expect(mcp).Should(HaveNodeDegradedMessage(o.MatchRegexp(expectedMessage)))
logger.Infof("OK\n")
exutil.By("Get Unreconcilable node")
allUnreconcilableNodes := mcp.GetUnreconcilableNodesOrFail()
o.Expect(allUnreconcilableNodes).NotTo(o.BeEmpty(), "Can not find any unreconcilable node from mcp %s", mcp.GetName())
unreconcilableNode := allUnreconcilableNodes[0]
logger.Infof("Unreconcilable node is %s", unreconcilableNode.GetName())
logger.Infof("OK\n")
exutil.By("Check machineconfignode conditions [UpdatePrepared/UpdateCompatible]")
mcn := NewMachineConfigNode(oc.AsAdmin(), unreconcilableNode.GetName())
o.Expect(mcn.GetUpdatePrepared()).Should(o.Equal(expectedMCNStatus))
o.Expect(mcn.GetUpdateCompatible()).Should(o.Equal(expectedMCNStatus))
o.Expect(mcn).Should(HaveConditionField("UpdateCompatible", "message", o.MatchRegexp(expectedMessage)))
logger.Infof("OK\n")
})
g.It("Author:rioliu-NonPreRelease-Medium-69205-[P1] MachineConfigNode corresponding condition status is Unknown when node is degraded [Disruptive]", func() {
var (
wrongUserFileConfig = `{"contents": {"source": "data:text/plain;charset=utf-8;base64,dGVzdA=="},"mode": 420,"path": "/etc/wronguser-test-file.test","user": {"name": "wronguser"}}`
mcName = "mco-tc-69205-wrong-file-user"
mcp = GetCompactCompatiblePool(oc.AsAdmin())
expectedDegradedReasonAnnotation = `failed to retrieve file ownership for file "/etc/wronguser-test-file.test": failed to retrieve UserID for username: wronguser`
expectedMCState = "Degraded"
expectedMCNStatus = "Unknown"
degradedNode Node
)
exutil.By("Create invalid MC")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", wrongUserFileConfig)}
mc.skipWaitForMcp = true
defer func() {
logger.Infof("\nStart to recover degraded mcp")
mc.deleteNoWait()
mcp.RecoverFromDegraded()
}()
mc.create()
exutil.By("Check mcp worker is degraded")
o.Eventually(mcp.getDegradedMachineCount, "5m", "5s").Should(o.BeNumerically("==", 1), "Degraded machine count is not ==1")
logger.Infof("OK\n")
exutil.By("Get degraded node")
allNodes := mcp.GetNodesOrFail()
for _, node := range allNodes {
if node.GetMachineConfigState() == expectedMCState {
degradedNode = node
break
}
}
o.Expect(degradedNode).NotTo(o.BeNil(), "Degraded node not found")
o.Expect(degradedNode.GetMachineConfigReason()).Should(o.ContainSubstring(expectedDegradedReasonAnnotation), "value of annotation machine config reason is not expected")
logger.Infof("Found degraded node %s", degradedNode.GetName())
logger.Infof("OK\n")
exutil.By("Check corresponding MCN")
mcn := NewMachineConfigNode(oc.AsAdmin(), degradedNode.GetName())
o.Expect(mcn.Exists()).Should(o.BeTrue(), "Cannot find MCN %s", degradedNode.GetName())
logger.Infof("OK\n")
exutil.By("Check MCN conditions")
o.Expect(mcn.GetAppliedFilesAndOS()).Should(o.Equal(expectedMCNStatus), "status of MCN condition UPDATEDFILESANDOS is not expected")
o.Expect(mcn.GetUpdateExecuted()).Should(o.Equal(expectedMCNStatus), "status of MCN condition UPDATEEXECUTED is not expected")
logger.Infof("OK\n")
})
g.It("Author:rioliu-NonPreRelease-Longduration-High-69755-[P2][OnCLayer] MachineConfigNode resources should be synced when node is created/deleted [Disruptive]", func() {
var (
provisioningMachine Machine
deletingMachine Machine
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
)
skipTestIfWorkersCannotBeScaled(oc.AsAdmin())
exutil.By("Get one machineset for testing")
msl, err := NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Get machinesets failed")
o.Expect(msl).ShouldNot(o.BeEmpty(), "Machineset list is empty")
ms := msl[0]
logger.Infof("Machineset %s will be used for testing", ms.GetName())
logger.Infof("OK\n")
replica, cerr := strconv.Atoi(ms.GetReplicaOfSpec())
o.Expect(cerr).NotTo(o.HaveOccurred(), "Convert string to int error")
defer func() {
if serr := ms.ScaleTo(replica); serr != nil {
logger.Errorf("Rollback replica for machineset %s failed: %v", ms.GetName(), serr)
}
mcp.waitForComplete()
}()
exutil.By("Scale up machineset to provision new node")
replica++
o.Expect(ms.ScaleTo(replica)).NotTo(o.HaveOccurred(), "Machineset %s scale up error", ms.GetName())
exutil.By("Find new machine")
provisioningMachine = ms.GetMachinesByPhaseOrFail(MachinePhaseProvisioning)[0]
o.Expect(provisioningMachine).NotTo(o.BeNil(), "Cannot find provisioning machine")
logger.Infof("New machine %s is provisioning", provisioningMachine.GetName())
o.Eventually(ms.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", ms.GetName())
logger.Infof("OK\n")
exutil.By("Check new MCN")
nodeToBeProvisioned := provisioningMachine.GetNodeOrFail()
newMCN := NewMachineConfigNode(oc.AsAdmin(), nodeToBeProvisioned.GetName())
o.Eventually(newMCN.Exists, "2m", "5s").Should(o.BeTrue(), "new MCN does not exist")
o.Eventually(newMCN.GetDesiredMachineConfigOfSpec, "2m", "5s").Should(o.Equal(mcp.getConfigNameOfSpecOrFail()), "desired config of mcn.spec is not same as same property value in worker pool")
o.Eventually(newMCN.GetDesiredMachineConfigOfStatus, "2m", "5s").Should(o.Equal(mcp.getConfigNameOfSpecOrFail()), "desired config of mcn.status is not same as same property value in worker pool")
o.Eventually(newMCN.GetCurrentMachineConfigOfStatus, "2m", "5s").Should(o.Equal(mcp.getConfigNameOfStatusOrFail()), "current config of mcn.status is not same as same property value in worker pool")
logger.Infof("OK\n")
exutil.By("Scale down machineset to remove node")
replica--
o.Expect(ms.ScaleTo(replica)).NotTo(o.HaveOccurred(), "Machineset %s scale down error", ms.GetName())
deletingMachine = ms.GetMachinesByPhaseOrFail(MachinePhaseDeleting)[0]
o.Expect(deletingMachine).ShouldNot(o.BeNil(), "Cannot find deleting machine")
logger.Infof("Machine %s is being deleted", deletingMachine.GetName())
nodeToBeDeleted := deletingMachine.GetNodeOrFail()
o.Eventually(ms.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", ms.GetName())
logger.Infof("OK\n")
exutil.By("Check Node and MCN are removed")
o.Eventually(nodeToBeDeleted.Exists, "10m", "1m").Should(o.BeFalse(), "Node %s is not deleted successfully", nodeToBeDeleted.GetName())
deletedMCN := NewMachineConfigNode(oc.AsAdmin(), nodeToBeDeleted.GetName())
o.Eventually(deletedMCN.Exists, "5m", "10s").Should(o.BeFalse(), "MCN % is not deleted successfully", deletedMCN.GetName())
logger.Infof("OK\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-High-74644-[P2][OnCLayer] Scope each MCN object to only be accessible from its associated MCD [Disruptive]", func() {
SkipIfSNO(oc.AsAdmin())
var (
nodes = exutil.OrFail[[]Node](NewNodeList(oc.AsAdmin()).GetAllLinux())
node0 = nodes[0]
node1 = nodes[1]
machineConfigNode0 = nodes[0].GetMachineConfigNode()
machineConfigNode1 = nodes[1].GetMachineConfigNode()
poolNode1 = machineConfigNode1.GetOrFail(`{.spec.pool.name}`)
patchMCN1 = `{"spec":{"pool":{"name":"` + poolNode1 + `"}}}` // Actually we are not patching anything since we are setting the current value
)
exutil.By("Check that a machineconfignode cannot be patched from another MCD")
cmd := []string{"./rootfs/usr/bin/oc", "patch", "machineconfignodes/" + machineConfigNode1.GetName(), "--type=merge", "-p", patchMCN1}
_, err := exutil.RemoteShContainer(oc.AsAdmin(), MachineConfigNamespace, node0.GetMachineConfigDaemon(), MachineConfigDaemon, cmd...)
o.Expect(err).To(o.HaveOccurred(), "It should not be allowed to patch a machineconfignode from a different machineconfigdaemon")
o.Expect(err).To(o.BeAssignableToTypeOf(&exutil.ExitError{}), "Unexpected error while patching the machineconfignode resource from another MCD")
o.Expect(err.(*exutil.ExitError).StdErr).Should(o.ContainSubstring(`updates to MCN %s can only be done from the MCN's owner node`, machineConfigNode1.GetName()),
"Unexpected error message when patching the machineconfignode from another MCD")
logger.Infof("OK!\n")
exutil.By("Check that a machineconfignode cannot be patched by MCD's SA")
err = machineConfigNode0.Patch("merge", patchMCN1, "--as=system:serviceaccount:openshift-machine-config-operator:machine-config-daemon")
o.Expect(err).To(o.HaveOccurred(), "MCD's SA should not be allowed to patch MachineConfigNode resources")
o.Expect(err).To(o.BeAssignableToTypeOf(&exutil.ExitError{}), "Unexpected error while patching the machineconfignode resource")
o.Expect(err.(*exutil.ExitError).StdErr).Should(o.ContainSubstring(`this user must have a "authentication.kubernetes.io/node-name" claim`),
"Unexpected error message when patching the machineconfignode resource with the MCD's SA")
logger.Infof("OK!\n")
exutil.By("Check able to patch the MCN by same MCD running on node")
_, err = exutil.RemoteShContainer(oc.AsAdmin(), MachineConfigNamespace, node1.GetMachineConfigDaemon(), MachineConfigDaemon, cmd...)
o.Expect(err).NotTo(o.HaveOccurred(),
"A MCD should be allowed to patch its own machineconfignode")
logger.Infof("OK!\n")
exutil.By("Check patch directly from oc using your admin SA")
o.Expect(
machineConfigNode0.Patch("merge", patchMCN1),
).To(o.Succeed(), "Admin SA should be allowed to patch machineconfignodes")
logger.Infof("OK!\n")
})
})
|
package mco
| ||||
test case
|
openshift/openshift-tests-private
|
1d38ba9a-9ddf-490c-9eba-9561663f2b7b
|
Author:rioliu-NonPreRelease-Critical-69184-[P1][OnCLayer] Enable feature gate MachineConfigNodes [Serial]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_machineconfignode.go
|
g.It("Author:rioliu-NonPreRelease-Critical-69184-[P1][OnCLayer] Enable feature gate MachineConfigNodes [Serial]", func() {
// need to check whether featureGate MachineConfigNodes is in enabled list
exutil.By("Check whether featureGate: MachineConfigNodes is enabled")
featureGate := NewResource(oc.AsAdmin(), "featuregate", "cluster")
enabled := featureGate.GetOrFail(`{.status.featureGates[*].enabled}`)
logger.Infof("enabled featuregates: %s", enabled)
o.Expect(enabled).Should(o.ContainSubstring("MachineConfigNodes"), "featureGate: MachineConfigNodes cannot be found")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
9f1f285e-6829-42c5-8e21-4432a88db198
|
Author:rioliu-NonPreRelease-High-69187-[P2][OnCLayer] validate MachineConfigNodes properties [Serial]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_machineconfignode.go
|
g.It("Author:rioliu-NonPreRelease-High-69187-[P2][OnCLayer] validate MachineConfigNodes properties [Serial]", func() {
nodes := NewNodeList(oc.AsAdmin()).GetAllLinuxWorkerNodesOrFail()
for _, node := range nodes {
mcn := NewMachineConfigNode(oc.AsAdmin(), node.GetName())
exutil.By(fmt.Sprintf("Check MachineConfigNode properties for %s", node.GetName()))
logger.Infof("Check spec.configVersion.desired")
desiredOfNode := node.GetDesiredMachineConfig()
desiredOfMCNSpec := mcn.GetDesiredMachineConfigOfSpec()
o.Expect(desiredOfNode).Should(o.Equal(desiredOfMCNSpec), "desired config of node is not same as machineconfignode.spec")
logger.Infof("Check spec.pool")
poolOfNode := node.GetPrimaryPoolOrFail().GetName()
poolOfMCNSpec := mcn.GetPool()
o.Expect(poolOfNode).Should(o.Equal(poolOfMCNSpec), "pool of node is not same as machineconfignode.spec")
logger.Infof("Check spec.node")
nodeOfMCNSpec := mcn.GetNode()
o.Expect(node.GetName()).Should(o.Equal(nodeOfMCNSpec), "node name is not same as machineconfignode.spec")
logger.Infof("Check status.configVersion.current")
currentOfNode := node.GetCurrentMachineConfig()
currentOfMCNStatus := mcn.GetCurrentMachineConfigOfStatus()
o.Expect(currentOfNode).Should(o.Equal(currentOfMCNStatus), "current config of node is not same as machineconfignode.status")
logger.Infof("Check status.configVersion.desired")
desiredOfMCNStatus := mcn.GetDesiredMachineConfigOfStatus()
o.Expect(desiredOfNode).Should(o.Equal(desiredOfMCNStatus), "desired config of node is not same as machineconfignode.status")
logger.Infof("OK\n")
}
})
| |||||
test case
|
openshift/openshift-tests-private
|
72c22976-3296-4ce2-a83d-0d46cb42c7ee
|
Author:rioliu-NonPreRelease-Longduration-High-69197-[OnCLayer] validate MachineConfigNode condition status transition [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_machineconfignode.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-69197-[OnCLayer] validate MachineConfigNode condition status transition [Disruptive]", func() {
var (
mcName = "create-test-file"
fileConfig = getURLEncodedFileConfig("/etc/test-file", "hello", "420")
workerMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
)
// create machine config to apply a file change
exutil.By("Create a test file on node")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetMCOTemplate(GenericMCTemplate)
mc.SetParams(fmt.Sprintf("FILES=[%s]", fileConfig))
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
exutil.By("Check conidition status of MachineConfigNode")
// get 1st updating worker nodes
workerNode := workerMcp.GetCordonedNodes()[0]
// if test fail, need to waiting for mcp to complete, then rollback the change
defer workerMcp.waitForComplete()
mcn := NewMachineConfigNode(oc.AsAdmin(), workerNode.GetName())
logger.Infof("Checking Updated False")
o.Eventually(mcn.GetUpdated, "1m", "5s").Should(o.Equal("False"))
logger.Infof("Checking UpdatePrepared")
o.Eventually(mcn.GetUpdatePrepared, "1m", "3s").Should(o.Equal("True"))
logger.Infof("Checking UpdateCompatible")
o.Eventually(mcn.GetUpdateCompatible, "3m", "3s").Should(o.Equal("True"))
logger.Infof("Checking UpdateExecuted Unknown")
o.Eventually(mcn.GetUpdateExecuted, "1m", "3s").Should(o.Equal("Unknown"))
logger.Infof("Checking Cordoned")
o.Eventually(mcn.GetCordoned, "30s", "3s").Should(o.Equal("True"))
logger.Infof("Checking Drained Unknown")
o.Eventually(mcn.GetDrained, "30s", "2s").Should(o.Equal("Unknown"))
logger.Infof("Checking Drained")
o.Eventually(mcn.GetDrained, "5m", "2s").Should(o.Equal("True"))
logger.Infof("Checking AppliedFilesAndOS Unknown")
o.Eventually(mcn.GetAppliedFilesAndOS, "1m", "1s").Should(o.Equal("Unknown"))
logger.Infof("Checking AppliedFilesAndOS")
o.Eventually(mcn.GetAppliedFilesAndOS, "3m", "2s").Should(o.Equal("True"))
logger.Infof("Checking UpdateExecuted")
o.Eventually(mcn.GetUpdateExecuted, "20s", "5s").Should(o.Equal("True"))
logger.Infof("Checking UpdatePostActionComplete")
o.Eventually(mcn.GetUpdatePostActionComplete, "30m", "5s").Should(o.Equal("Unknown"))
logger.Infof("Checking RebootedNode Unknown")
o.Eventually(mcn.GetRebootedNode, "15s", "3s").Should(o.Equal("Unknown"))
logger.Infof("Checking RebootedNode")
o.Eventually(mcn.GetRebootedNode, "5m", "5s").Should(o.Equal("True"))
logger.Infof("Checking Resumed")
o.Eventually(mcn.GetResumed, "15s", "5s").Should(o.Equal("True"))
logger.Infof("Checking UpdateComplete")
o.Eventually(mcn.GetUpdateComplete, "10s", "5s").Should(o.Equal("True"))
logger.Infof("Checking Uncordoned")
o.Eventually(mcn.GetUncordoned, "10s", "2s").Should(o.Equal("True"))
logger.Infof("Checking Updated")
o.Eventually(mcn.GetUpdated, "1m", "5s").Should(o.Equal("True"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
712a41f3-db97-4da4-a76c-77c84c4346e1
|
Author:rioliu-DEPRECATED-NonPreRelease-Medium-69216-MachineConfigNode UpdateCompatible is Unknown when MC contains kernel argument in ignition section [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_machineconfignode.go
|
g.It("Author:rioliu-DEPRECATED-NonPreRelease-Medium-69216-MachineConfigNode UpdateCompatible is Unknown when MC contains kernel argument in ignition section [Disruptive]", func() {
var (
mcName = "mco-tc-66376-reject-ignition-kernel-arguments"
mcTemplate = "add-ignition-kernel-arguments.yaml"
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
expectedMessage = "ignition kargs section contains changes"
expectedMCNStatus = "Unknown"
)
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName()).SetMCOTemplate(mcTemplate)
mc.skipWaitForMcp = true
exutil.By("Create invalid MC")
defer func() {
mc.deleteNoWait()
o.Expect(mcp.WaitForNotDegradedStatus()).NotTo(o.HaveOccurred(), "mcp worker is not recovered from degraded sate")
}()
mc.create()
exutil.By("Check condition NodeDegraded of worker pool")
o.Eventually(mcp, "2m", "5s").Should(HaveConditionField("NodeDegraded", "status", o.Equal("True")))
o.Expect(mcp).Should(HaveNodeDegradedMessage(o.MatchRegexp(expectedMessage)))
logger.Infof("OK\n")
exutil.By("Get Unreconcilable node")
allUnreconcilableNodes := mcp.GetUnreconcilableNodesOrFail()
o.Expect(allUnreconcilableNodes).NotTo(o.BeEmpty(), "Can not find any unreconcilable node from mcp %s", mcp.GetName())
unreconcilableNode := allUnreconcilableNodes[0]
logger.Infof("Unreconcilable node is %s", unreconcilableNode.GetName())
logger.Infof("OK\n")
exutil.By("Check machineconfignode conditions [UpdatePrepared/UpdateCompatible]")
mcn := NewMachineConfigNode(oc.AsAdmin(), unreconcilableNode.GetName())
o.Expect(mcn.GetUpdatePrepared()).Should(o.Equal(expectedMCNStatus))
o.Expect(mcn.GetUpdateCompatible()).Should(o.Equal(expectedMCNStatus))
o.Expect(mcn).Should(HaveConditionField("UpdateCompatible", "message", o.MatchRegexp(expectedMessage)))
logger.Infof("OK\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
83d92cbf-d01d-4a61-bb8f-115a4a58f5d0
|
Author:rioliu-NonPreRelease-Medium-69205-[P1] MachineConfigNode corresponding condition status is Unknown when node is degraded [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_machineconfignode.go
|
g.It("Author:rioliu-NonPreRelease-Medium-69205-[P1] MachineConfigNode corresponding condition status is Unknown when node is degraded [Disruptive]", func() {
var (
wrongUserFileConfig = `{"contents": {"source": "data:text/plain;charset=utf-8;base64,dGVzdA=="},"mode": 420,"path": "/etc/wronguser-test-file.test","user": {"name": "wronguser"}}`
mcName = "mco-tc-69205-wrong-file-user"
mcp = GetCompactCompatiblePool(oc.AsAdmin())
expectedDegradedReasonAnnotation = `failed to retrieve file ownership for file "/etc/wronguser-test-file.test": failed to retrieve UserID for username: wronguser`
expectedMCState = "Degraded"
expectedMCNStatus = "Unknown"
degradedNode Node
)
exutil.By("Create invalid MC")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf("FILES=[%s]", wrongUserFileConfig)}
mc.skipWaitForMcp = true
defer func() {
logger.Infof("\nStart to recover degraded mcp")
mc.deleteNoWait()
mcp.RecoverFromDegraded()
}()
mc.create()
exutil.By("Check mcp worker is degraded")
o.Eventually(mcp.getDegradedMachineCount, "5m", "5s").Should(o.BeNumerically("==", 1), "Degraded machine count is not ==1")
logger.Infof("OK\n")
exutil.By("Get degraded node")
allNodes := mcp.GetNodesOrFail()
for _, node := range allNodes {
if node.GetMachineConfigState() == expectedMCState {
degradedNode = node
break
}
}
o.Expect(degradedNode).NotTo(o.BeNil(), "Degraded node not found")
o.Expect(degradedNode.GetMachineConfigReason()).Should(o.ContainSubstring(expectedDegradedReasonAnnotation), "value of annotation machine config reason is not expected")
logger.Infof("Found degraded node %s", degradedNode.GetName())
logger.Infof("OK\n")
exutil.By("Check corresponding MCN")
mcn := NewMachineConfigNode(oc.AsAdmin(), degradedNode.GetName())
o.Expect(mcn.Exists()).Should(o.BeTrue(), "Cannot find MCN %s", degradedNode.GetName())
logger.Infof("OK\n")
exutil.By("Check MCN conditions")
o.Expect(mcn.GetAppliedFilesAndOS()).Should(o.Equal(expectedMCNStatus), "status of MCN condition UPDATEDFILESANDOS is not expected")
o.Expect(mcn.GetUpdateExecuted()).Should(o.Equal(expectedMCNStatus), "status of MCN condition UPDATEEXECUTED is not expected")
logger.Infof("OK\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
7bb26725-b790-4f30-b16c-8df7f00197e9
|
Author:rioliu-NonPreRelease-Longduration-High-69755-[P2][OnCLayer] MachineConfigNode resources should be synced when node is created/deleted [Disruptive]
|
['"strconv"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_machineconfignode.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-69755-[P2][OnCLayer] MachineConfigNode resources should be synced when node is created/deleted [Disruptive]", func() {
var (
provisioningMachine Machine
deletingMachine Machine
mcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
)
skipTestIfWorkersCannotBeScaled(oc.AsAdmin())
exutil.By("Get one machineset for testing")
msl, err := NewMachineSetList(oc.AsAdmin(), MachineAPINamespace).GetAll()
o.Expect(err).NotTo(o.HaveOccurred(), "Get machinesets failed")
o.Expect(msl).ShouldNot(o.BeEmpty(), "Machineset list is empty")
ms := msl[0]
logger.Infof("Machineset %s will be used for testing", ms.GetName())
logger.Infof("OK\n")
replica, cerr := strconv.Atoi(ms.GetReplicaOfSpec())
o.Expect(cerr).NotTo(o.HaveOccurred(), "Convert string to int error")
defer func() {
if serr := ms.ScaleTo(replica); serr != nil {
logger.Errorf("Rollback replica for machineset %s failed: %v", ms.GetName(), serr)
}
mcp.waitForComplete()
}()
exutil.By("Scale up machineset to provision new node")
replica++
o.Expect(ms.ScaleTo(replica)).NotTo(o.HaveOccurred(), "Machineset %s scale up error", ms.GetName())
exutil.By("Find new machine")
provisioningMachine = ms.GetMachinesByPhaseOrFail(MachinePhaseProvisioning)[0]
o.Expect(provisioningMachine).NotTo(o.BeNil(), "Cannot find provisioning machine")
logger.Infof("New machine %s is provisioning", provisioningMachine.GetName())
o.Eventually(ms.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", ms.GetName())
logger.Infof("OK\n")
exutil.By("Check new MCN")
nodeToBeProvisioned := provisioningMachine.GetNodeOrFail()
newMCN := NewMachineConfigNode(oc.AsAdmin(), nodeToBeProvisioned.GetName())
o.Eventually(newMCN.Exists, "2m", "5s").Should(o.BeTrue(), "new MCN does not exist")
o.Eventually(newMCN.GetDesiredMachineConfigOfSpec, "2m", "5s").Should(o.Equal(mcp.getConfigNameOfSpecOrFail()), "desired config of mcn.spec is not same as same property value in worker pool")
o.Eventually(newMCN.GetDesiredMachineConfigOfStatus, "2m", "5s").Should(o.Equal(mcp.getConfigNameOfSpecOrFail()), "desired config of mcn.status is not same as same property value in worker pool")
o.Eventually(newMCN.GetCurrentMachineConfigOfStatus, "2m", "5s").Should(o.Equal(mcp.getConfigNameOfStatusOrFail()), "current config of mcn.status is not same as same property value in worker pool")
logger.Infof("OK\n")
exutil.By("Scale down machineset to remove node")
replica--
o.Expect(ms.ScaleTo(replica)).NotTo(o.HaveOccurred(), "Machineset %s scale down error", ms.GetName())
deletingMachine = ms.GetMachinesByPhaseOrFail(MachinePhaseDeleting)[0]
o.Expect(deletingMachine).ShouldNot(o.BeNil(), "Cannot find deleting machine")
logger.Infof("Machine %s is being deleted", deletingMachine.GetName())
nodeToBeDeleted := deletingMachine.GetNodeOrFail()
o.Eventually(ms.GetIsReady, "20m", "2m").Should(o.BeTrue(), "MachineSet %s is not ready", ms.GetName())
logger.Infof("OK\n")
exutil.By("Check Node and MCN are removed")
o.Eventually(nodeToBeDeleted.Exists, "10m", "1m").Should(o.BeFalse(), "Node %s is not deleted successfully", nodeToBeDeleted.GetName())
deletedMCN := NewMachineConfigNode(oc.AsAdmin(), nodeToBeDeleted.GetName())
o.Eventually(deletedMCN.Exists, "5m", "10s").Should(o.BeFalse(), "MCN % is not deleted successfully", deletedMCN.GetName())
logger.Infof("OK\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
c989cd67-4e30-4b79-a29d-e333ce587cf3
|
Author:sregidor-NonHyperShiftHOST-NonPreRelease-High-74644-[P2][OnCLayer] Scope each MCN object to only be accessible from its associated MCD [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_machineconfignode.go
|
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-High-74644-[P2][OnCLayer] Scope each MCN object to only be accessible from its associated MCD [Disruptive]", func() {
SkipIfSNO(oc.AsAdmin())
var (
nodes = exutil.OrFail[[]Node](NewNodeList(oc.AsAdmin()).GetAllLinux())
node0 = nodes[0]
node1 = nodes[1]
machineConfigNode0 = nodes[0].GetMachineConfigNode()
machineConfigNode1 = nodes[1].GetMachineConfigNode()
poolNode1 = machineConfigNode1.GetOrFail(`{.spec.pool.name}`)
patchMCN1 = `{"spec":{"pool":{"name":"` + poolNode1 + `"}}}` // Actually we are not patching anything since we are setting the current value
)
exutil.By("Check that a machineconfignode cannot be patched from another MCD")
cmd := []string{"./rootfs/usr/bin/oc", "patch", "machineconfignodes/" + machineConfigNode1.GetName(), "--type=merge", "-p", patchMCN1}
_, err := exutil.RemoteShContainer(oc.AsAdmin(), MachineConfigNamespace, node0.GetMachineConfigDaemon(), MachineConfigDaemon, cmd...)
o.Expect(err).To(o.HaveOccurred(), "It should not be allowed to patch a machineconfignode from a different machineconfigdaemon")
o.Expect(err).To(o.BeAssignableToTypeOf(&exutil.ExitError{}), "Unexpected error while patching the machineconfignode resource from another MCD")
o.Expect(err.(*exutil.ExitError).StdErr).Should(o.ContainSubstring(`updates to MCN %s can only be done from the MCN's owner node`, machineConfigNode1.GetName()),
"Unexpected error message when patching the machineconfignode from another MCD")
logger.Infof("OK!\n")
exutil.By("Check that a machineconfignode cannot be patched by MCD's SA")
err = machineConfigNode0.Patch("merge", patchMCN1, "--as=system:serviceaccount:openshift-machine-config-operator:machine-config-daemon")
o.Expect(err).To(o.HaveOccurred(), "MCD's SA should not be allowed to patch MachineConfigNode resources")
o.Expect(err).To(o.BeAssignableToTypeOf(&exutil.ExitError{}), "Unexpected error while patching the machineconfignode resource")
o.Expect(err.(*exutil.ExitError).StdErr).Should(o.ContainSubstring(`this user must have a "authentication.kubernetes.io/node-name" claim`),
"Unexpected error message when patching the machineconfignode resource with the MCD's SA")
logger.Infof("OK!\n")
exutil.By("Check able to patch the MCN by same MCD running on node")
_, err = exutil.RemoteShContainer(oc.AsAdmin(), MachineConfigNamespace, node1.GetMachineConfigDaemon(), MachineConfigDaemon, cmd...)
o.Expect(err).NotTo(o.HaveOccurred(),
"A MCD should be allowed to patch its own machineconfignode")
logger.Infof("OK!\n")
exutil.By("Check patch directly from oc using your admin SA")
o.Expect(
machineConfigNode0.Patch("merge", patchMCN1),
).To(o.Succeed(), "Admin SA should be allowed to patch machineconfignodes")
logger.Infof("OK!\n")
})
| ||||||
test
|
openshift/openshift-tests-private
|
3d7a0476-55fa-4297-984c-d8fac12b17aa
|
mco_metrics
|
import (
"fmt"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_metrics.go
|
package mco
import (
"fmt"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
var _ = g.Describe("[sig-mco] MCO metrics", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("mco-metrics", exutil.KubeConfigPath())
// Compact compatible MCP. If the node is compact/SNO this variable will be the master pool, else it will be the worker pool
mcp *MachineConfigPool
)
g.JustBeforeEach(func() {
mcp = GetCompactCompatiblePool(oc.AsAdmin())
preChecks(oc)
})
g.It("Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-77356-[P1][OnCLayer] Test mcd_local_unsupported_packages metric [Disruptive]", func() {
var (
node = mcp.GetSortedNodesOrFail()[0]
query = `mcd_local_unsupported_packages{node="` + node.GetName() + `"}`
valueJSONPath = `data.result.0.value.1`
expectedUnsupportedPackages = "5"
deferredReesetNeeded = true
)
exutil.By("Configure coreos stream repo in a node")
defer RemoveConfiguredStreamCentosRepo(node)
o.Expect(ConfigureStreamCentosRepo(node, "9-stream")).To(o.Succeed(),
"Error configuring the centos repo in %s", node)
logger.Infof("OK!\n")
exutil.By("Check that no unsupported packages are reported")
monitor, err := exutil.NewMonitor(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the monitor to query the metricts")
o.Eventually(monitor.SimpleQuery, "10s", "2s").WithArguments(query).Should(HavePathWithValue(valueJSONPath, o.Equal("0")),
"There are reported unsupported packages in %s", node)
logger.Infof("OK!\n")
exutil.By("Install package from repo")
defer func() {
if deferredReesetNeeded {
if err := node.OSReset(); err != nil {
logger.Errorf("Error in the OS Reset: %s", err)
}
if err := node.Reboot(); err != nil {
logger.Errorf("Error in the reboot: %s", err)
}
mcp.waitForComplete()
} else {
logger.Infof("The OS Reset has been already executed. No need to execute it again in the deferred section")
}
}()
installedPackage := "man-db"
_, err = node.InstallRpm(installedPackage)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error installing %s package in %s", installedPackage, node)
logger.Infof("OK!\n")
exutil.By("Install package locally")
logger.Infof("Dowload package")
_, err = node.DebugNodeWithChroot("sh", "-c", useProxyInCommand("curl -kL https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm -o /tmp/epel-release-latest-9.noarch.rpm"))
o.Expect(err).NotTo(o.HaveOccurred(), "Error downloading the epel rpm package")
logger.Infof("Install")
_, err = node.InstallRpm("/tmp/epel-release-latest-9.noarch.rpm")
o.Expect(err).NotTo(o.HaveOccurred(),
"Error installing %s package in %s", installedPackage, node)
logger.Infof("OK!\n")
exutil.By("Remove package from OS")
removedPackage := "git-core"
_, err = node.DebugNodeWithChroot("rpm-ostree", "override", "remove", removedPackage)
o.Expect(err).NotTo(o.HaveOccurred(), "Error removing package %s from %s", removedPackage, node)
logger.Infof("OK!\n")
exutil.By("Replace an existing package in the OS")
if exutil.OrFail[bool](node.CanUseDnfDownload()) {
replacedPackage := "nano"
pkgPath, err := node.DnfDownload(replacedPackage, "/tmp")
o.Expect(err).NotTo(o.HaveOccurred(), "Error downloading %s package", replacedPackage)
_, err = node.DebugNodeWithChroot("rpm-ostree", "override", "replace", "--experimental", pkgPath)
o.Expect(err).NotTo(o.HaveOccurred(), "Error replacing %s package in the OS", replacedPackage)
} else {
expectedUnsupportedPackages = "4"
logger.Infof("It is not possible to use dnf to download the right package. We skip the package removal. Expected unsupported packages will be %s now", expectedUnsupportedPackages)
}
logger.Infof("OK!\n")
exutil.By("Override package locally")
logger.Infof("Dowload package")
_, err = node.DebugNodeWithChroot("sh", "-c", useProxyInCommand("curl -kL https://mirrors.rpmfusion.org/free/el/rpmfusion-free-release-9.noarch.rpm -o /tmp/rpmfusion-free-release-9.noarch.rpm"))
o.Expect(err).NotTo(o.HaveOccurred(), "Error downloading the rpmfusion rpm package")
_, err = node.DebugNodeWithChroot("rpm-ostree", "install", "--force-replacefiles", "/tmp/rpmfusion-free-release-9.noarch.rpm")
o.Expect(err).NotTo(o.HaveOccurred(), "Error installing forcing a replace")
logger.Infof("OK!\n")
exutil.By("Check that no unsupported packages are reported before rebooting the node")
o.Eventually(monitor.SimpleQuery, "10s", "2s").WithArguments(query).Should(HavePathWithValue(valueJSONPath, o.Equal("0")),
"There are reported unsupported packages in %s after restoring the original status", node)
logger.Infof("OK!\n")
exutil.By("Reboot the node to apply the changes")
o.Expect(node.Reboot()).To(o.Succeed(),
"Error rebooting %s to apply the changes", node)
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the right number of unsupported packages is reported")
o.Eventually(monitor.SimpleQuery, "10s", "2s").WithArguments(query).Should(HavePathWithValue(valueJSONPath, o.Equal(expectedUnsupportedPackages)),
"The metric is not reporting the right number of unsupported packages in %s", node)
logger.Infof("OK!\n")
exutil.By("Restore original OS status in the node")
o.Eventually(node.IsRpmOsTreeIdle, "10m", "20s").
Should(o.BeTrue(), "rpm-ostree status didn't become idle")
o.Expect(node.OSReset()).To(o.Succeed(),
"Error restoring the original OS status in %s", node)
o.Expect(node.Reboot()).To(o.Succeed(),
"Error rebooting %s to restore the initial state", node)
mcp.waitForComplete()
deferredReesetNeeded = false
logger.Infof("OK!\n")
exutil.By("Check that no unsupported packages are reported after restoring the original OS status")
o.Eventually(monitor.SimpleQuery, "10s", "2s").WithArguments(query).Should(HavePathWithValue(valueJSONPath, o.Equal("0")),
"There are reported unsupported packages in %s after restoring the original status", node)
logger.Infof("OK!\n")
})
})
func useProxyInCommand(cmd string) string {
return fmt.Sprintf("set -a; source /etc/mco/proxy.env && %s", cmd)
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
345b1e3b-18cb-43a7-99ed-41c10edf3e65
|
useProxyInCommand
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_metrics.go
|
func useProxyInCommand(cmd string) string {
return fmt.Sprintf("set -a; source /etc/mco/proxy.env && %s", cmd)
}
|
mco
| ||||
test case
|
openshift/openshift-tests-private
|
23b3c1fa-0301-433d-9a43-a5bb494ea74b
|
Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-77356-[P1][OnCLayer] Test mcd_local_unsupported_packages metric [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_metrics.go
|
g.It("Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-77356-[P1][OnCLayer] Test mcd_local_unsupported_packages metric [Disruptive]", func() {
var (
node = mcp.GetSortedNodesOrFail()[0]
query = `mcd_local_unsupported_packages{node="` + node.GetName() + `"}`
valueJSONPath = `data.result.0.value.1`
expectedUnsupportedPackages = "5"
deferredReesetNeeded = true
)
exutil.By("Configure coreos stream repo in a node")
defer RemoveConfiguredStreamCentosRepo(node)
o.Expect(ConfigureStreamCentosRepo(node, "9-stream")).To(o.Succeed(),
"Error configuring the centos repo in %s", node)
logger.Infof("OK!\n")
exutil.By("Check that no unsupported packages are reported")
monitor, err := exutil.NewMonitor(oc.AsAdmin())
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the monitor to query the metricts")
o.Eventually(monitor.SimpleQuery, "10s", "2s").WithArguments(query).Should(HavePathWithValue(valueJSONPath, o.Equal("0")),
"There are reported unsupported packages in %s", node)
logger.Infof("OK!\n")
exutil.By("Install package from repo")
defer func() {
if deferredReesetNeeded {
if err := node.OSReset(); err != nil {
logger.Errorf("Error in the OS Reset: %s", err)
}
if err := node.Reboot(); err != nil {
logger.Errorf("Error in the reboot: %s", err)
}
mcp.waitForComplete()
} else {
logger.Infof("The OS Reset has been already executed. No need to execute it again in the deferred section")
}
}()
installedPackage := "man-db"
_, err = node.InstallRpm(installedPackage)
o.Expect(err).NotTo(o.HaveOccurred(),
"Error installing %s package in %s", installedPackage, node)
logger.Infof("OK!\n")
exutil.By("Install package locally")
logger.Infof("Dowload package")
_, err = node.DebugNodeWithChroot("sh", "-c", useProxyInCommand("curl -kL https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm -o /tmp/epel-release-latest-9.noarch.rpm"))
o.Expect(err).NotTo(o.HaveOccurred(), "Error downloading the epel rpm package")
logger.Infof("Install")
_, err = node.InstallRpm("/tmp/epel-release-latest-9.noarch.rpm")
o.Expect(err).NotTo(o.HaveOccurred(),
"Error installing %s package in %s", installedPackage, node)
logger.Infof("OK!\n")
exutil.By("Remove package from OS")
removedPackage := "git-core"
_, err = node.DebugNodeWithChroot("rpm-ostree", "override", "remove", removedPackage)
o.Expect(err).NotTo(o.HaveOccurred(), "Error removing package %s from %s", removedPackage, node)
logger.Infof("OK!\n")
exutil.By("Replace an existing package in the OS")
if exutil.OrFail[bool](node.CanUseDnfDownload()) {
replacedPackage := "nano"
pkgPath, err := node.DnfDownload(replacedPackage, "/tmp")
o.Expect(err).NotTo(o.HaveOccurred(), "Error downloading %s package", replacedPackage)
_, err = node.DebugNodeWithChroot("rpm-ostree", "override", "replace", "--experimental", pkgPath)
o.Expect(err).NotTo(o.HaveOccurred(), "Error replacing %s package in the OS", replacedPackage)
} else {
expectedUnsupportedPackages = "4"
logger.Infof("It is not possible to use dnf to download the right package. We skip the package removal. Expected unsupported packages will be %s now", expectedUnsupportedPackages)
}
logger.Infof("OK!\n")
exutil.By("Override package locally")
logger.Infof("Dowload package")
_, err = node.DebugNodeWithChroot("sh", "-c", useProxyInCommand("curl -kL https://mirrors.rpmfusion.org/free/el/rpmfusion-free-release-9.noarch.rpm -o /tmp/rpmfusion-free-release-9.noarch.rpm"))
o.Expect(err).NotTo(o.HaveOccurred(), "Error downloading the rpmfusion rpm package")
_, err = node.DebugNodeWithChroot("rpm-ostree", "install", "--force-replacefiles", "/tmp/rpmfusion-free-release-9.noarch.rpm")
o.Expect(err).NotTo(o.HaveOccurred(), "Error installing forcing a replace")
logger.Infof("OK!\n")
exutil.By("Check that no unsupported packages are reported before rebooting the node")
o.Eventually(monitor.SimpleQuery, "10s", "2s").WithArguments(query).Should(HavePathWithValue(valueJSONPath, o.Equal("0")),
"There are reported unsupported packages in %s after restoring the original status", node)
logger.Infof("OK!\n")
exutil.By("Reboot the node to apply the changes")
o.Expect(node.Reboot()).To(o.Succeed(),
"Error rebooting %s to apply the changes", node)
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the right number of unsupported packages is reported")
o.Eventually(monitor.SimpleQuery, "10s", "2s").WithArguments(query).Should(HavePathWithValue(valueJSONPath, o.Equal(expectedUnsupportedPackages)),
"The metric is not reporting the right number of unsupported packages in %s", node)
logger.Infof("OK!\n")
exutil.By("Restore original OS status in the node")
o.Eventually(node.IsRpmOsTreeIdle, "10m", "20s").
Should(o.BeTrue(), "rpm-ostree status didn't become idle")
o.Expect(node.OSReset()).To(o.Succeed(),
"Error restoring the original OS status in %s", node)
o.Expect(node.Reboot()).To(o.Succeed(),
"Error rebooting %s to restore the initial state", node)
mcp.waitForComplete()
deferredReesetNeeded = false
logger.Infof("OK!\n")
exutil.By("Check that no unsupported packages are reported after restoring the original OS status")
o.Eventually(monitor.SimpleQuery, "10s", "2s").WithArguments(query).Should(HavePathWithValue(valueJSONPath, o.Equal("0")),
"There are reported unsupported packages in %s after restoring the original status", node)
logger.Infof("OK!\n")
})
| ||||||
test
|
openshift/openshift-tests-private
|
891e7d46-6b13-4c48-b371-e479d06da640
|
mco_nodedisruptionpolicy
|
import (
"fmt"
"os"
"regexp"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
package mco
import (
"fmt"
"os"
"regexp"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-mco] MCO NodeDisruptionPolicy", func() {
defer g.GinkgoRecover()
const (
LogPrefix = `Performing post config change action: `
LogPerformingPostConfigNone = LogPrefix + "None"
LogPerformingPostConfigReload = LogPrefix + "Reload"
LogPerformingPostConfigRestart = LogPrefix + "Restart"
LogPerformingPostConfigDaemonReload = LogPrefix + "DaemonReload"
LogTemplateForUnitAction = `%s service %s successfully`
)
var (
oc = exutil.NewCLI("mco-nodedisruptionpolicy", exutil.KubeConfigPath())
TestService = "crio.service"
LogServiceReloadedSuccessfully = fmt.Sprintf(LogTemplateForUnitAction, TestService, "reloaded")
LogServiceRestartedSuccessfully = fmt.Sprintf(LogTemplateForUnitAction, TestService, "restarted")
LogDaemonReloadedSuccessfully = fmt.Sprintf(LogTemplateForUnitAction, "daemon-reload", "reloaded")
)
g.JustBeforeEach(func() {
preChecks(oc)
// skip the test if featureSet is not there
// check featureGate NodeDisruptionPolicy is enabled
enabledFeatureGates := NewResource(oc.AsAdmin(), "featuregate", "cluster").GetOrFail(`{.status.featureGates[*].enabled}`)
o.Expect(enabledFeatureGates).Should(o.ContainSubstring("NodeDisruptionPolicy"), "featureGate: NodeDisruptionPolicy is not in enabled list")
})
g.It("Author:rioliu-NonPreRelease-High-73368-[P1] NodeDisruptionPolicy files with action None [Disruptive]", func() {
testFileBasedPolicy(oc, "73368", []Action{NewCommonAction(NodeDisruptionPolicyActionNone)}, []string{LogPerformingPostConfigNone})
})
g.It("Author:rioliu-NonPreRelease-Longduration-High-73374-[P2] NodeDisruptionPolicy files with action Reboot [Disruptive]", func() {
testFileBasedPolicy(oc, "73374", []Action{NewCommonAction(NodeDisruptionPolicyActionReboot)}, []string{})
})
g.It("Author:rioliu-NonPreRelease-High-73375-NodeDisruptionPolicy files with action Restart [Disruptive]", func() {
testFileBasedPolicy(oc, "73375", []Action{NewRestartAction(TestService)}, []string{LogPerformingPostConfigRestart, LogServiceRestartedSuccessfully})
})
g.It("Author:rioliu-NonPreRelease-High-73378-[P1] NodeDisruptionPolicy files with action Reload [Disruptive]", func() {
testFileBasedPolicy(oc, "73378", []Action{NewReloadAction(TestService)}, []string{LogPerformingPostConfigReload, LogServiceReloadedSuccessfully})
})
g.It("Author:rioliu-NonPreRelease-High-73385-[P2] NodeDisruptionPolicy files with action DaemonReload [Disruptive]", func() {
testFileBasedPolicy(oc, "73385", []Action{NewCommonAction(NodeDisruptionPolicyActionDaemonReload)}, []string{LogPerformingPostConfigDaemonReload, LogDaemonReloadedSuccessfully})
})
g.It("Author:rioliu-NonPreRelease-Longduration-High-73388-NodeDisruptionPolicy files with action Drain [Disruptive]", func() {
testFileBasedPolicy(oc, "73388", []Action{NewCommonAction(NodeDisruptionPolicyActionDrain)}, []string{})
})
g.It("Author:rioliu-NonPreRelease-Longduration-High-73389-[P1] NodeDisruptionPolicy files with multiple actions [Disruptive]", func() {
testFileBasedPolicy(oc, "73389", []Action{
NewCommonAction(NodeDisruptionPolicyActionDrain),
NewCommonAction(NodeDisruptionPolicyActionDaemonReload),
NewReloadAction(TestService),
NewRestartAction(TestService),
}, []string{
LogPerformingPostConfigReload,
LogServiceReloadedSuccessfully,
LogPerformingPostConfigRestart,
LogServiceRestartedSuccessfully,
LogPerformingPostConfigDaemonReload,
LogDaemonReloadedSuccessfully,
})
})
g.It("Author:rioliu-NonPreRelease-High-73414-[P1] NodeDisruptionPolicy units with action None [Disruptive]", func() {
testUnitBasedPolicy(oc, "73414", []Action{NewCommonAction(NodeDisruptionPolicyActionNone)}, []string{LogPerformingPostConfigNone})
})
g.It("Author:rioliu-NonPreRelease-Longduration-High-73413-[P2] NodeDisruptionPolicy units with action Reboot [Disruptive]", func() {
testUnitBasedPolicy(oc, "73413", []Action{NewCommonAction(NodeDisruptionPolicyActionReboot)}, []string{})
})
g.It("Author:rioliu-NonPreRelease-Longduration-High-73411-NodeDisruptionPolicy units with multiple actions [Disruptive]", func() {
testUnitBasedPolicy(oc, "73411", []Action{
NewCommonAction(NodeDisruptionPolicyActionDrain),
NewCommonAction(NodeDisruptionPolicyActionDaemonReload),
NewReloadAction(TestService),
NewRestartAction(TestService),
}, []string{
LogPerformingPostConfigReload,
LogServiceReloadedSuccessfully,
LogPerformingPostConfigRestart,
LogServiceRestartedSuccessfully,
LogPerformingPostConfigDaemonReload,
LogDaemonReloadedSuccessfully,
})
})
g.It("Author:rioliu-NonPreRelease-High-73417-[P1] NodeDisruptionPolicy sshkey with action None [Disruptive]", func() {
testSSHKeyBasedPolicy(oc, "73417", []Action{NewCommonAction(NodeDisruptionPolicyActionNone)}, []string{LogPerformingPostConfigNone})
})
g.It("Author:rioliu-NonPreRelease-Longduration-High-73418-[P2] NodeDisruptionPolicy sshkey with action Reboot [Disruptive]", func() {
testSSHKeyBasedPolicy(oc, "73418", []Action{NewCommonAction(NodeDisruptionPolicyActionReboot)}, []string{})
})
g.It("Author:rioliu-NonPreRelease-Longduration-High-73415-NodeDisruptionPolicy sshkey with multiple actions [Disruptive]", func() {
testSSHKeyBasedPolicy(oc, "73415", []Action{
NewCommonAction(NodeDisruptionPolicyActionDrain),
NewCommonAction(NodeDisruptionPolicyActionDaemonReload),
NewReloadAction(TestService),
NewRestartAction(TestService),
}, []string{
LogPerformingPostConfigReload,
LogServiceReloadedSuccessfully,
LogPerformingPostConfigRestart,
LogServiceRestartedSuccessfully,
LogPerformingPostConfigDaemonReload,
LogDaemonReloadedSuccessfully,
})
})
g.It("Author:rioliu-NonPreRelease-High-73489-[P1] NodeDisruptionPolicy MachineConfigurations is only effective with name cluster", func() {
var (
filePath = generateTempFilePath(e2e.TestContext.OutputDir, "invalidmc-*")
fileContent = strings.ReplaceAll(NewNodeDisruptionPolicy(oc).PrettyString(), "cluster", "iminvalid")
)
exutil.By("Create machineconfiguration.operator.openshift.io with invalid name")
o.Expect(os.WriteFile(filePath, []byte(fileContent), 0o644)).NotTo(o.HaveOccurred(), "create invalid MC file failed")
defer os.Remove(filePath)
output, ocerr := oc.AsAdmin().Run("apply").Args("-f", filePath).Output()
exutil.By("Check whether oc command is failed")
o.Expect(ocerr).To(o.HaveOccurred(), "Expected oc command error not found")
o.Expect(output).Should(o.ContainSubstring("Only a single object of MachineConfiguration is allowed and it must be named cluster"))
})
g.It("Author:rioliu-NonPreRelease-Longduration-Medium-75109-[P2] NodeDisruptionPolicy files allow paths to be defined for non-disruptive updates [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
node = mcp.GetSortedNodesOrFail()[0]
ndp = NewNodeDisruptionPolicy(oc)
innerDirPath = "/etc/test-file-policy-subdir-75109/extradir/"
innerDirFilePath = innerDirPath + "test-file-inner.txt"
innerDirFileConfig = getURLEncodedFileConfig(innerDirFilePath, "test-75109.txt", "420")
innerDirActions = []Action{NewCommonAction(NodeDisruptionPolicyActionNone)}
innerDirExpectedLogs = []string{LogPerformingPostConfigNone}
innderDirMcName = "test-75109-inner-dir-files"
outerDirPath = "/etc/test-file-policy-subdir-75109"
outerDirFilePath = outerDirPath + "/test-file-outer.txt"
outerDirFileConfig = getURLEncodedFileConfig(outerDirFilePath, "test-75109.txt", "420")
outerDirActions = []Action{NewRestartAction(TestService)}
outerDirExpectedLogs = []string{LogPerformingPostConfigRestart, LogServiceRestartedSuccessfully}
outerDirMcName = "test-75109-outer-dir-files"
filePath = "/etc/test-file-policy-subdir-75109/test-file.txt"
fileConfig = getURLEncodedFileConfig(filePath, "test-75109.txt", "420")
fileActions = []Action{NewReloadAction(TestService)}
fileExpectedLogs = []string{LogPerformingPostConfigReload, LogServiceReloadedSuccessfully}
fileMcName = "test-75109-files"
startTime = node.GetDateOrFail()
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
)
exutil.By("Patch ManchineConfiguration cluster")
defer ndp.Rollback()
o.Expect(
ndp.AddFilePolicy(innerDirPath, innerDirActions...).AddFilePolicy(outerDirPath, outerDirActions...).AddFilePolicy(filePath, fileActions...).Apply(),
).To(o.Succeed(), "Patch ManchineConfiguration failed")
logger.Infof("OK!\n")
// Test the behaviour of files created inside the inner directorty
exutil.By("Create a test file in the inner directory")
innerMc := NewMachineConfig(oc.AsAdmin(), innderDirMcName, mcp.GetName())
innerMc.SetParams(fmt.Sprintf("FILES=[%s]", innerDirFileConfig))
defer innerMc.delete()
innerMc.create()
logger.Infof("OK!\n")
exutil.By("Check that files inside the inner directory execute the right actions")
checkDrainAndReboot(node, startTime, mcc, innerDirActions)
checkMachineConfigDaemonLog(node, innerDirExpectedLogs)
logger.Infof("OK!\n")
// Test the behaviour of files created inside the outer directorty
exutil.By("Create a test file in the outer directory")
startTime = node.GetDateOrFail()
mcc.IgnoreLogsBeforeNowOrFail()
outerMc := NewMachineConfig(oc.AsAdmin(), outerDirMcName, mcp.GetName())
outerMc.SetParams(fmt.Sprintf("FILES=[%s]", outerDirFileConfig))
defer outerMc.Delete()
outerMc.create()
logger.Infof("OK!\n")
exutil.By("Check that files inside the outer directory execute the right actions")
checkDrainAndReboot(node, startTime, mcc, outerDirActions)
checkMachineConfigDaemonLog(node, outerDirExpectedLogs)
logger.Infof("OK!\n")
// Test the behaviour of files created inside the outer directorty but with an explicit policy for them
exutil.By("Create a test file inside the outer directory but with an explicitly defined policy")
startTime = node.GetDateOrFail()
mcc.IgnoreLogsBeforeNowOrFail()
fileMc := NewMachineConfig(oc.AsAdmin(), fileMcName, mcp.GetName())
fileMc.SetParams(fmt.Sprintf("FILES=[%s]", fileConfig))
defer fileMc.Delete()
fileMc.create()
logger.Infof("OK!\n")
exutil.By("Check that files with explicit defined policies execute the right actions")
checkDrainAndReboot(node, startTime, mcc, fileActions)
checkMachineConfigDaemonLog(node, fileExpectedLogs)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-75110-Propagate a NodeDisruptionPolicy failure condition via degrading the daemon [Disruptive]", func() {
var (
invalidService = "fake.service"
invalidActions = []Action{NewReloadAction(invalidService)}
validActions = []Action{NewReloadAction(TestService)}
mcp = GetCompactCompatiblePool(oc.AsAdmin())
mcName = "mco-tc-75110-failed-node-disruption-policy-action"
filePath = "/etc/test-file-policy-tc-75110-failed-action"
fileContent = "test"
fileConfig = getURLEncodedFileConfig(filePath, fileContent, "420")
expectedNDMessage = regexp.QuoteMeta(fmt.Sprintf("error running systemctl reload %s: Failed to reload %s: Unit %s not found", invalidService, invalidService, invalidService)) // quotemeta to scape regex characters
expectedNDReason = "1 nodes are reporting degraded status on sync"
)
exutil.By("Configure and invalid action")
ndp := NewNodeDisruptionPolicy(oc)
defer ndp.Rollback()
defer mcp.RecoverFromDegraded()
o.Expect(ndp.AddFilePolicy(filePath, invalidActions...).Apply()).To(o.Succeed(), "Patch ManchineConfiguration failed")
logger.Infof("OK!\n")
exutil.By("Create a MC using the configured disruption policy")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetParams(fmt.Sprintf("FILES=[%s]", fileConfig))
mc.skipWaitForMcp = true
defer mc.deleteNoWait()
mc.create()
logger.Infof("OK!\n")
checkDegraded(mcp, expectedNDMessage, expectedNDReason, "NodeDegraded", false, 1)
exutil.By("Fix the disruption policy configuration")
o.Expect(ndp.AddFilePolicy(filePath, validActions...).Apply()).To(o.Succeed(), "Patch ManchineConfiguration failed")
logger.Infof("OK!\n")
exutil.By("Check that the configuration can be applied")
o.Eventually(mcp, "10m", "20s").ShouldNot(BeDegraded(),
"The node disruption policy was fixed but the MCP didn't stop being degraded")
mcp.waitForComplete()
o.Eventually(NewResource(oc.AsAdmin(), "co", "machine-config"), "2m", "20s").ShouldNot(BeDegraded(),
"machine-config CO should not be degraded anymore once the configuration is applied")
o.Eventually(NewRemoteFile(mcp.GetSortedNodesOrFail()[0], filePath)).Should(HaveContent(fileContent),
"The configuration was applied but the deployed file doesn't have the right content")
logger.Infof("OK!\n")
})
})
// test func for file based policy test cases
func testFileBasedPolicy(oc *exutil.CLI, caseID string, actions []Action, expectedLogs []string) {
var (
mcName = fmt.Sprintf("create-test-file-%s-%s", caseID, exutil.GetRandomString())
filePath = fmt.Sprintf("/etc/test-file-policy-%s-%s", caseID, exutil.GetRandomString())
fileConfig = getURLEncodedFileConfig(filePath, fmt.Sprintf("test-%s", caseID), "420")
workerNode = NewNodeList(oc.AsAdmin()).GetAllLinuxWorkerNodesOrFail()[0]
workerMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
startTime = workerNode.GetDateOrFail()
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
)
exutil.By("Patch ManchineConfiguration cluster")
ndp := NewNodeDisruptionPolicy(oc)
defer ndp.Rollback()
o.Expect(ndp.AddFilePolicy(filePath, actions...).Apply()).To(o.Succeed(), "Patch ManchineConfiguration failed")
exutil.By("Check the nodeDisruptionPolicyStatus, new change should be merged")
o.Expect(ndp.IsUpdated()).To(o.BeTrue(), "New policies are not merged properly")
exutil.By("Create a test file on worker node")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetParams(fmt.Sprintf("FILES=[%s]", fileConfig))
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
// check MCN for reboot and drain
if exutil.OrFail[bool](IsFeaturegateEnabled(oc.AsAdmin(), MachineConfigNodesFeature)) {
checkMachineConfigNode(oc, workerNode.GetName(), actions)
}
workerMcp.waitForComplete()
// check reboot and drain
checkDrainAndReboot(workerNode, startTime, mcc, actions)
// check MCD logs if expectedLogs is not empty
checkMachineConfigDaemonLog(workerNode, expectedLogs)
}
// test func for unit based policy test cases
func testUnitBasedPolicy(oc *exutil.CLI, caseID string, actions []Action, expectedLogs []string) {
var (
unitName = fmt.Sprintf("test-ndp-%s.service", exutil.GetRandomString())
unitContent = "[Unit]\nDescription=test service for disruption policy"
unitEnabled = false
unitConfig = getSingleUnitConfig(unitName, unitEnabled, unitContent)
mcName = fmt.Sprintf("create-test-unit-%s-%s", caseID, exutil.GetRandomString())
workerNode = NewNodeList(oc.AsAdmin()).GetAllLinuxWorkerNodesOrFail()[0]
workerMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
startTime = workerNode.GetDateOrFail()
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
)
exutil.By("Patch ManchineConfiguration cluster")
ndp := NewNodeDisruptionPolicy(oc)
defer ndp.Rollback()
o.Expect(ndp.AddUnitPolicy(unitName, actions...).Apply()).To(o.Succeed(), "Patch ManchineConfiguration failed")
exutil.By("Check the nodeDisruptionPolicyStatus, new change should be merged")
o.Expect(ndp.IsUpdated()).To(o.BeTrue(), "New policies are not merged properly")
exutil.By("Create a test unit on worker node")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetParams(fmt.Sprintf("UNITS=[%s]", unitConfig))
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
// check MCN for reboot and drain
if exutil.OrFail[bool](IsFeaturegateEnabled(oc.AsAdmin(), MachineConfigNodesFeature)) {
checkMachineConfigNode(oc, workerNode.GetName(), actions)
}
workerMcp.waitForComplete()
// check reboot and drain
checkDrainAndReboot(workerNode, startTime, mcc, actions)
// check MCD logs if expectedLogs is not empty
checkMachineConfigDaemonLog(workerNode, expectedLogs)
}
// test func for sshkey based policy test cases
func testSSHKeyBasedPolicy(oc *exutil.CLI, caseID string, actions []Action, expectedLogs []string) {
var (
mcName = fmt.Sprintf("create-test-sshkey-%s-%s", caseID, exutil.GetRandomString())
// sshkey change only works on coreOS node
workerNode = NewNodeList(oc.AsAdmin()).GetAllCoreOsWokerNodesOrFail()[0]
workerMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
startTime = workerNode.GetDateOrFail()
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
)
exutil.By("Patch ManchineConfiguration cluster")
ndp := NewNodeDisruptionPolicy(oc)
defer ndp.Rollback()
o.Expect(ndp.SetSSHKeyPolicy(actions...).Apply()).To(o.Succeed(), "Patch ManchineConfiguration failed")
exutil.By("Check the nodeDisruptionPolicyStatus, new change should be merged")
o.Expect(ndp.IsUpdated()).To(o.BeTrue(), "New policies are not merged properly")
exutil.By("Create machine config with new SSH authorized key")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker).SetMCOTemplate(TmplAddSSHAuthorizedKeyForWorker)
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
// check MCN for reboot and drain
if exutil.OrFail[bool](IsFeaturegateEnabled(oc.AsAdmin(), MachineConfigNodesFeature)) {
checkMachineConfigNode(oc, workerNode.GetName(), actions)
}
workerMcp.waitForComplete()
// check reboot and drain
checkDrainAndReboot(workerNode, startTime, mcc, actions)
// check MCD logs if expectedLogs is not empty
checkMachineConfigDaemonLog(workerNode, expectedLogs)
}
// test func used to check expected logs in MCD log
func checkMachineConfigDaemonLog(node Node, expectedLogs []string) {
if len(expectedLogs) > 0 {
exutil.By("Check MCD log for post config actions")
logs, err := node.GetMCDaemonLogs("update.go")
o.Expect(err).NotTo(o.HaveOccurred(), "Get MCD log failed")
for _, log := range expectedLogs {
o.Expect(logs).Should(o.ContainSubstring(log), "Cannot find expected log for post config actions")
}
}
}
// test func to check MCN by input actions
func checkMachineConfigNode(oc *exutil.CLI, nodeName string, actions []Action) {
hasRebootAction := hasAction(NodeDisruptionPolicyActionReboot, actions)
hasDrainAction := hasAction(NodeDisruptionPolicyActionDrain, actions)
mcn := NewMachineConfigNode(oc.AsAdmin(), nodeName)
if hasDrainAction {
exutil.By("Check whether the node is drained")
o.Eventually(mcn.GetDrained, "5m", "2s").Should(o.Equal("True"))
}
if hasRebootAction {
exutil.By("Check whether the node is rebooted")
o.Eventually(mcn.GetRebootedNode, "10m", "6s").Should(o.Equal("True"))
}
}
// test func to check drain and reboot actions without using MCN
func checkDrainAndReboot(node Node, startTime time.Time, controller *Controller, actions []Action) {
hasRebootAction := hasAction(NodeDisruptionPolicyActionReboot, actions)
hasDrainAction := hasAction(NodeDisruptionPolicyActionDrain, actions)
// A drain operation is always executed when a reboot opration is executed, even if the drain action is not configured
// In SNO clusters the drain operation is not executed if the node is rebooted
checkDrainAction(hasDrainAction || (hasRebootAction && !IsSNO(node.GetOC())), node, controller)
checkRebootAction(hasRebootAction, node, startTime)
}
func hasAction(actnType string, actions []Action) bool {
found := false
for _, a := range actions {
if a.Type == actnType {
found = true
break
}
}
return found
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
034c0a59-c9ae-402e-8ce5-e1861e4a1043
|
testFileBasedPolicy
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
func testFileBasedPolicy(oc *exutil.CLI, caseID string, actions []Action, expectedLogs []string) {
var (
mcName = fmt.Sprintf("create-test-file-%s-%s", caseID, exutil.GetRandomString())
filePath = fmt.Sprintf("/etc/test-file-policy-%s-%s", caseID, exutil.GetRandomString())
fileConfig = getURLEncodedFileConfig(filePath, fmt.Sprintf("test-%s", caseID), "420")
workerNode = NewNodeList(oc.AsAdmin()).GetAllLinuxWorkerNodesOrFail()[0]
workerMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
startTime = workerNode.GetDateOrFail()
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
)
exutil.By("Patch ManchineConfiguration cluster")
ndp := NewNodeDisruptionPolicy(oc)
defer ndp.Rollback()
o.Expect(ndp.AddFilePolicy(filePath, actions...).Apply()).To(o.Succeed(), "Patch ManchineConfiguration failed")
exutil.By("Check the nodeDisruptionPolicyStatus, new change should be merged")
o.Expect(ndp.IsUpdated()).To(o.BeTrue(), "New policies are not merged properly")
exutil.By("Create a test file on worker node")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetParams(fmt.Sprintf("FILES=[%s]", fileConfig))
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
// check MCN for reboot and drain
if exutil.OrFail[bool](IsFeaturegateEnabled(oc.AsAdmin(), MachineConfigNodesFeature)) {
checkMachineConfigNode(oc, workerNode.GetName(), actions)
}
workerMcp.waitForComplete()
// check reboot and drain
checkDrainAndReboot(workerNode, startTime, mcc, actions)
// check MCD logs if expectedLogs is not empty
checkMachineConfigDaemonLog(workerNode, expectedLogs)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
4ed4c2aa-549d-47e2-9b53-cec74df21f8a
|
testUnitBasedPolicy
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
func testUnitBasedPolicy(oc *exutil.CLI, caseID string, actions []Action, expectedLogs []string) {
var (
unitName = fmt.Sprintf("test-ndp-%s.service", exutil.GetRandomString())
unitContent = "[Unit]\nDescription=test service for disruption policy"
unitEnabled = false
unitConfig = getSingleUnitConfig(unitName, unitEnabled, unitContent)
mcName = fmt.Sprintf("create-test-unit-%s-%s", caseID, exutil.GetRandomString())
workerNode = NewNodeList(oc.AsAdmin()).GetAllLinuxWorkerNodesOrFail()[0]
workerMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
startTime = workerNode.GetDateOrFail()
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
)
exutil.By("Patch ManchineConfiguration cluster")
ndp := NewNodeDisruptionPolicy(oc)
defer ndp.Rollback()
o.Expect(ndp.AddUnitPolicy(unitName, actions...).Apply()).To(o.Succeed(), "Patch ManchineConfiguration failed")
exutil.By("Check the nodeDisruptionPolicyStatus, new change should be merged")
o.Expect(ndp.IsUpdated()).To(o.BeTrue(), "New policies are not merged properly")
exutil.By("Create a test unit on worker node")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetParams(fmt.Sprintf("UNITS=[%s]", unitConfig))
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
// check MCN for reboot and drain
if exutil.OrFail[bool](IsFeaturegateEnabled(oc.AsAdmin(), MachineConfigNodesFeature)) {
checkMachineConfigNode(oc, workerNode.GetName(), actions)
}
workerMcp.waitForComplete()
// check reboot and drain
checkDrainAndReboot(workerNode, startTime, mcc, actions)
// check MCD logs if expectedLogs is not empty
checkMachineConfigDaemonLog(workerNode, expectedLogs)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
5db0b4d4-b34e-497c-9328-559f1d1fb5e2
|
testSSHKeyBasedPolicy
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
func testSSHKeyBasedPolicy(oc *exutil.CLI, caseID string, actions []Action, expectedLogs []string) {
var (
mcName = fmt.Sprintf("create-test-sshkey-%s-%s", caseID, exutil.GetRandomString())
// sshkey change only works on coreOS node
workerNode = NewNodeList(oc.AsAdmin()).GetAllCoreOsWokerNodesOrFail()[0]
workerMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
startTime = workerNode.GetDateOrFail()
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
)
exutil.By("Patch ManchineConfiguration cluster")
ndp := NewNodeDisruptionPolicy(oc)
defer ndp.Rollback()
o.Expect(ndp.SetSSHKeyPolicy(actions...).Apply()).To(o.Succeed(), "Patch ManchineConfiguration failed")
exutil.By("Check the nodeDisruptionPolicyStatus, new change should be merged")
o.Expect(ndp.IsUpdated()).To(o.BeTrue(), "New policies are not merged properly")
exutil.By("Create machine config with new SSH authorized key")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker).SetMCOTemplate(TmplAddSSHAuthorizedKeyForWorker)
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
// check MCN for reboot and drain
if exutil.OrFail[bool](IsFeaturegateEnabled(oc.AsAdmin(), MachineConfigNodesFeature)) {
checkMachineConfigNode(oc, workerNode.GetName(), actions)
}
workerMcp.waitForComplete()
// check reboot and drain
checkDrainAndReboot(workerNode, startTime, mcc, actions)
// check MCD logs if expectedLogs is not empty
checkMachineConfigDaemonLog(workerNode, expectedLogs)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
97ce11c6-a071-435c-93a1-b6b72fbd1bd9
|
checkMachineConfigDaemonLog
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
func checkMachineConfigDaemonLog(node Node, expectedLogs []string) {
if len(expectedLogs) > 0 {
exutil.By("Check MCD log for post config actions")
logs, err := node.GetMCDaemonLogs("update.go")
o.Expect(err).NotTo(o.HaveOccurred(), "Get MCD log failed")
for _, log := range expectedLogs {
o.Expect(logs).Should(o.ContainSubstring(log), "Cannot find expected log for post config actions")
}
}
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
38f20f15-c71e-4ecb-a948-e08a98bac014
|
checkMachineConfigNode
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
func checkMachineConfigNode(oc *exutil.CLI, nodeName string, actions []Action) {
hasRebootAction := hasAction(NodeDisruptionPolicyActionReboot, actions)
hasDrainAction := hasAction(NodeDisruptionPolicyActionDrain, actions)
mcn := NewMachineConfigNode(oc.AsAdmin(), nodeName)
if hasDrainAction {
exutil.By("Check whether the node is drained")
o.Eventually(mcn.GetDrained, "5m", "2s").Should(o.Equal("True"))
}
if hasRebootAction {
exutil.By("Check whether the node is rebooted")
o.Eventually(mcn.GetRebootedNode, "10m", "6s").Should(o.Equal("True"))
}
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
77581cf4-773d-40ee-8c06-29cb49912bf1
|
checkDrainAndReboot
|
['"time"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
func checkDrainAndReboot(node Node, startTime time.Time, controller *Controller, actions []Action) {
hasRebootAction := hasAction(NodeDisruptionPolicyActionReboot, actions)
hasDrainAction := hasAction(NodeDisruptionPolicyActionDrain, actions)
// A drain operation is always executed when a reboot opration is executed, even if the drain action is not configured
// In SNO clusters the drain operation is not executed if the node is rebooted
checkDrainAction(hasDrainAction || (hasRebootAction && !IsSNO(node.GetOC())), node, controller)
checkRebootAction(hasRebootAction, node, startTime)
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
e7d3f599-17ca-4635-ad34-15e274c337cd
|
hasAction
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
func hasAction(actnType string, actions []Action) bool {
found := false
for _, a := range actions {
if a.Type == actnType {
found = true
break
}
}
return found
}
|
mco
| |||||
test case
|
openshift/openshift-tests-private
|
0822ca18-a7fd-4524-bbeb-d025933b2735
|
Author:rioliu-NonPreRelease-High-73368-[P1] NodeDisruptionPolicy files with action None [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-High-73368-[P1] NodeDisruptionPolicy files with action None [Disruptive]", func() {
testFileBasedPolicy(oc, "73368", []Action{NewCommonAction(NodeDisruptionPolicyActionNone)}, []string{LogPerformingPostConfigNone})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
999291b4-2cec-4a23-826a-6e70c4fbb796
|
Author:rioliu-NonPreRelease-Longduration-High-73374-[P2] NodeDisruptionPolicy files with action Reboot [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-73374-[P2] NodeDisruptionPolicy files with action Reboot [Disruptive]", func() {
testFileBasedPolicy(oc, "73374", []Action{NewCommonAction(NodeDisruptionPolicyActionReboot)}, []string{})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
42cefd95-8be2-4fbd-b4b8-21c75d613871
|
Author:rioliu-NonPreRelease-High-73375-NodeDisruptionPolicy files with action Restart [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-High-73375-NodeDisruptionPolicy files with action Restart [Disruptive]", func() {
testFileBasedPolicy(oc, "73375", []Action{NewRestartAction(TestService)}, []string{LogPerformingPostConfigRestart, LogServiceRestartedSuccessfully})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
1b971fe0-cd8f-406f-9234-72e8281ef51f
|
Author:rioliu-NonPreRelease-High-73378-[P1] NodeDisruptionPolicy files with action Reload [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-High-73378-[P1] NodeDisruptionPolicy files with action Reload [Disruptive]", func() {
testFileBasedPolicy(oc, "73378", []Action{NewReloadAction(TestService)}, []string{LogPerformingPostConfigReload, LogServiceReloadedSuccessfully})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
0fb6b827-71b6-4153-853c-167a5c24ca2c
|
Author:rioliu-NonPreRelease-High-73385-[P2] NodeDisruptionPolicy files with action DaemonReload [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-High-73385-[P2] NodeDisruptionPolicy files with action DaemonReload [Disruptive]", func() {
testFileBasedPolicy(oc, "73385", []Action{NewCommonAction(NodeDisruptionPolicyActionDaemonReload)}, []string{LogPerformingPostConfigDaemonReload, LogDaemonReloadedSuccessfully})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
355d10d6-8002-40f0-96a9-7e9d18940817
|
Author:rioliu-NonPreRelease-Longduration-High-73388-NodeDisruptionPolicy files with action Drain [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-73388-NodeDisruptionPolicy files with action Drain [Disruptive]", func() {
testFileBasedPolicy(oc, "73388", []Action{NewCommonAction(NodeDisruptionPolicyActionDrain)}, []string{})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
7d20c3cc-3f6f-4051-a333-f4a3f1ead4a5
|
Author:rioliu-NonPreRelease-Longduration-High-73389-[P1] NodeDisruptionPolicy files with multiple actions [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-73389-[P1] NodeDisruptionPolicy files with multiple actions [Disruptive]", func() {
testFileBasedPolicy(oc, "73389", []Action{
NewCommonAction(NodeDisruptionPolicyActionDrain),
NewCommonAction(NodeDisruptionPolicyActionDaemonReload),
NewReloadAction(TestService),
NewRestartAction(TestService),
}, []string{
LogPerformingPostConfigReload,
LogServiceReloadedSuccessfully,
LogPerformingPostConfigRestart,
LogServiceRestartedSuccessfully,
LogPerformingPostConfigDaemonReload,
LogDaemonReloadedSuccessfully,
})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
3cc4ad56-079f-429d-9572-42431cf5c172
|
Author:rioliu-NonPreRelease-High-73414-[P1] NodeDisruptionPolicy units with action None [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-High-73414-[P1] NodeDisruptionPolicy units with action None [Disruptive]", func() {
testUnitBasedPolicy(oc, "73414", []Action{NewCommonAction(NodeDisruptionPolicyActionNone)}, []string{LogPerformingPostConfigNone})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
89ed2d9e-77d7-4340-96d6-23d541f1bc73
|
Author:rioliu-NonPreRelease-Longduration-High-73413-[P2] NodeDisruptionPolicy units with action Reboot [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-73413-[P2] NodeDisruptionPolicy units with action Reboot [Disruptive]", func() {
testUnitBasedPolicy(oc, "73413", []Action{NewCommonAction(NodeDisruptionPolicyActionReboot)}, []string{})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
19013f8a-f2e5-4a7b-9ebd-78090f8a94d0
|
Author:rioliu-NonPreRelease-Longduration-High-73411-NodeDisruptionPolicy units with multiple actions [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-73411-NodeDisruptionPolicy units with multiple actions [Disruptive]", func() {
testUnitBasedPolicy(oc, "73411", []Action{
NewCommonAction(NodeDisruptionPolicyActionDrain),
NewCommonAction(NodeDisruptionPolicyActionDaemonReload),
NewReloadAction(TestService),
NewRestartAction(TestService),
}, []string{
LogPerformingPostConfigReload,
LogServiceReloadedSuccessfully,
LogPerformingPostConfigRestart,
LogServiceRestartedSuccessfully,
LogPerformingPostConfigDaemonReload,
LogDaemonReloadedSuccessfully,
})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
c4ad0d93-d6c7-48f4-ae9d-d72848ffb5b9
|
Author:rioliu-NonPreRelease-High-73417-[P1] NodeDisruptionPolicy sshkey with action None [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-High-73417-[P1] NodeDisruptionPolicy sshkey with action None [Disruptive]", func() {
testSSHKeyBasedPolicy(oc, "73417", []Action{NewCommonAction(NodeDisruptionPolicyActionNone)}, []string{LogPerformingPostConfigNone})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
f4cd8390-35bf-4064-b149-132dad3f6625
|
Author:rioliu-NonPreRelease-Longduration-High-73418-[P2] NodeDisruptionPolicy sshkey with action Reboot [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-73418-[P2] NodeDisruptionPolicy sshkey with action Reboot [Disruptive]", func() {
testSSHKeyBasedPolicy(oc, "73418", []Action{NewCommonAction(NodeDisruptionPolicyActionReboot)}, []string{})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
33c96108-1669-41a6-bdc0-f39ce8b0f5c5
|
Author:rioliu-NonPreRelease-Longduration-High-73415-NodeDisruptionPolicy sshkey with multiple actions [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-High-73415-NodeDisruptionPolicy sshkey with multiple actions [Disruptive]", func() {
testSSHKeyBasedPolicy(oc, "73415", []Action{
NewCommonAction(NodeDisruptionPolicyActionDrain),
NewCommonAction(NodeDisruptionPolicyActionDaemonReload),
NewReloadAction(TestService),
NewRestartAction(TestService),
}, []string{
LogPerformingPostConfigReload,
LogServiceReloadedSuccessfully,
LogPerformingPostConfigRestart,
LogServiceRestartedSuccessfully,
LogPerformingPostConfigDaemonReload,
LogDaemonReloadedSuccessfully,
})
})
| ||||||
test case
|
openshift/openshift-tests-private
|
8b3daae9-404e-41d9-97dd-55d7739d0679
|
Author:rioliu-NonPreRelease-High-73489-[P1] NodeDisruptionPolicy MachineConfigurations is only effective with name cluster
|
['"os"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-High-73489-[P1] NodeDisruptionPolicy MachineConfigurations is only effective with name cluster", func() {
var (
filePath = generateTempFilePath(e2e.TestContext.OutputDir, "invalidmc-*")
fileContent = strings.ReplaceAll(NewNodeDisruptionPolicy(oc).PrettyString(), "cluster", "iminvalid")
)
exutil.By("Create machineconfiguration.operator.openshift.io with invalid name")
o.Expect(os.WriteFile(filePath, []byte(fileContent), 0o644)).NotTo(o.HaveOccurred(), "create invalid MC file failed")
defer os.Remove(filePath)
output, ocerr := oc.AsAdmin().Run("apply").Args("-f", filePath).Output()
exutil.By("Check whether oc command is failed")
o.Expect(ocerr).To(o.HaveOccurred(), "Expected oc command error not found")
o.Expect(output).Should(o.ContainSubstring("Only a single object of MachineConfiguration is allowed and it must be named cluster"))
})
| |||||
test case
|
openshift/openshift-tests-private
|
0db72788-24ee-400f-b3f0-7b3c99ea285c
|
Author:rioliu-NonPreRelease-Longduration-Medium-75109-[P2] NodeDisruptionPolicy files allow paths to be defined for non-disruptive updates [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:rioliu-NonPreRelease-Longduration-Medium-75109-[P2] NodeDisruptionPolicy files allow paths to be defined for non-disruptive updates [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
node = mcp.GetSortedNodesOrFail()[0]
ndp = NewNodeDisruptionPolicy(oc)
innerDirPath = "/etc/test-file-policy-subdir-75109/extradir/"
innerDirFilePath = innerDirPath + "test-file-inner.txt"
innerDirFileConfig = getURLEncodedFileConfig(innerDirFilePath, "test-75109.txt", "420")
innerDirActions = []Action{NewCommonAction(NodeDisruptionPolicyActionNone)}
innerDirExpectedLogs = []string{LogPerformingPostConfigNone}
innderDirMcName = "test-75109-inner-dir-files"
outerDirPath = "/etc/test-file-policy-subdir-75109"
outerDirFilePath = outerDirPath + "/test-file-outer.txt"
outerDirFileConfig = getURLEncodedFileConfig(outerDirFilePath, "test-75109.txt", "420")
outerDirActions = []Action{NewRestartAction(TestService)}
outerDirExpectedLogs = []string{LogPerformingPostConfigRestart, LogServiceRestartedSuccessfully}
outerDirMcName = "test-75109-outer-dir-files"
filePath = "/etc/test-file-policy-subdir-75109/test-file.txt"
fileConfig = getURLEncodedFileConfig(filePath, "test-75109.txt", "420")
fileActions = []Action{NewReloadAction(TestService)}
fileExpectedLogs = []string{LogPerformingPostConfigReload, LogServiceReloadedSuccessfully}
fileMcName = "test-75109-files"
startTime = node.GetDateOrFail()
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
)
exutil.By("Patch ManchineConfiguration cluster")
defer ndp.Rollback()
o.Expect(
ndp.AddFilePolicy(innerDirPath, innerDirActions...).AddFilePolicy(outerDirPath, outerDirActions...).AddFilePolicy(filePath, fileActions...).Apply(),
).To(o.Succeed(), "Patch ManchineConfiguration failed")
logger.Infof("OK!\n")
// Test the behaviour of files created inside the inner directorty
exutil.By("Create a test file in the inner directory")
innerMc := NewMachineConfig(oc.AsAdmin(), innderDirMcName, mcp.GetName())
innerMc.SetParams(fmt.Sprintf("FILES=[%s]", innerDirFileConfig))
defer innerMc.delete()
innerMc.create()
logger.Infof("OK!\n")
exutil.By("Check that files inside the inner directory execute the right actions")
checkDrainAndReboot(node, startTime, mcc, innerDirActions)
checkMachineConfigDaemonLog(node, innerDirExpectedLogs)
logger.Infof("OK!\n")
// Test the behaviour of files created inside the outer directorty
exutil.By("Create a test file in the outer directory")
startTime = node.GetDateOrFail()
mcc.IgnoreLogsBeforeNowOrFail()
outerMc := NewMachineConfig(oc.AsAdmin(), outerDirMcName, mcp.GetName())
outerMc.SetParams(fmt.Sprintf("FILES=[%s]", outerDirFileConfig))
defer outerMc.Delete()
outerMc.create()
logger.Infof("OK!\n")
exutil.By("Check that files inside the outer directory execute the right actions")
checkDrainAndReboot(node, startTime, mcc, outerDirActions)
checkMachineConfigDaemonLog(node, outerDirExpectedLogs)
logger.Infof("OK!\n")
// Test the behaviour of files created inside the outer directorty but with an explicit policy for them
exutil.By("Create a test file inside the outer directory but with an explicitly defined policy")
startTime = node.GetDateOrFail()
mcc.IgnoreLogsBeforeNowOrFail()
fileMc := NewMachineConfig(oc.AsAdmin(), fileMcName, mcp.GetName())
fileMc.SetParams(fmt.Sprintf("FILES=[%s]", fileConfig))
defer fileMc.Delete()
fileMc.create()
logger.Infof("OK!\n")
exutil.By("Check that files with explicit defined policies execute the right actions")
checkDrainAndReboot(node, startTime, mcc, fileActions)
checkMachineConfigDaemonLog(node, fileExpectedLogs)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
c6e8c8af-2d91-4d9f-8568-260a9c794e47
|
Author:sregidor-NonPreRelease-Longduration-Medium-75110-Propagate a NodeDisruptionPolicy failure condition via degrading the daemon [Disruptive]
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_nodedisruptionpolicy.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-75110-Propagate a NodeDisruptionPolicy failure condition via degrading the daemon [Disruptive]", func() {
var (
invalidService = "fake.service"
invalidActions = []Action{NewReloadAction(invalidService)}
validActions = []Action{NewReloadAction(TestService)}
mcp = GetCompactCompatiblePool(oc.AsAdmin())
mcName = "mco-tc-75110-failed-node-disruption-policy-action"
filePath = "/etc/test-file-policy-tc-75110-failed-action"
fileContent = "test"
fileConfig = getURLEncodedFileConfig(filePath, fileContent, "420")
expectedNDMessage = regexp.QuoteMeta(fmt.Sprintf("error running systemctl reload %s: Failed to reload %s: Unit %s not found", invalidService, invalidService, invalidService)) // quotemeta to scape regex characters
expectedNDReason = "1 nodes are reporting degraded status on sync"
)
exutil.By("Configure and invalid action")
ndp := NewNodeDisruptionPolicy(oc)
defer ndp.Rollback()
defer mcp.RecoverFromDegraded()
o.Expect(ndp.AddFilePolicy(filePath, invalidActions...).Apply()).To(o.Succeed(), "Patch ManchineConfiguration failed")
logger.Infof("OK!\n")
exutil.By("Create a MC using the configured disruption policy")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetParams(fmt.Sprintf("FILES=[%s]", fileConfig))
mc.skipWaitForMcp = true
defer mc.deleteNoWait()
mc.create()
logger.Infof("OK!\n")
checkDegraded(mcp, expectedNDMessage, expectedNDReason, "NodeDegraded", false, 1)
exutil.By("Fix the disruption policy configuration")
o.Expect(ndp.AddFilePolicy(filePath, validActions...).Apply()).To(o.Succeed(), "Patch ManchineConfiguration failed")
logger.Infof("OK!\n")
exutil.By("Check that the configuration can be applied")
o.Eventually(mcp, "10m", "20s").ShouldNot(BeDegraded(),
"The node disruption policy was fixed but the MCP didn't stop being degraded")
mcp.waitForComplete()
o.Eventually(NewResource(oc.AsAdmin(), "co", "machine-config"), "2m", "20s").ShouldNot(BeDegraded(),
"machine-config CO should not be degraded anymore once the configuration is applied")
o.Eventually(NewRemoteFile(mcp.GetSortedNodesOrFail()[0], filePath)).Should(HaveContent(fileContent),
"The configuration was applied but the deployed file doesn't have the right content")
logger.Infof("OK!\n")
})
| |||||
test
|
openshift/openshift-tests-private
|
17e71e9a-7efc-4cf3-bb07-29ea055b01d7
|
mco_ocb
|
import (
"fmt"
"strings"
"sync"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
package mco
import (
"fmt"
"strings"
"sync"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
var _ = g.Describe("[sig-mco] MCO ocb", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("mco-ocb", exutil.KubeConfigPath())
)
g.JustBeforeEach(func() {
preChecks(oc)
// According to https://issues.redhat.com/browse/MCO-831, featureSet:TechPreviewNoUpgrade is required
// xref: featureGate: OnClusterBuild
if !exutil.IsTechPreviewNoUpgrade(oc) {
g.Skip("featureSet: TechPreviewNoUpgrade is required for this test")
}
skipTestIfOCBIsEnabled(oc)
})
g.It("Author:sregidor-NonPreRelease-Medium-79172-OCB Inherit from global pull secret if baseImagePullSecret field is not specified [Disruptive]", func() {
var (
infraMcpName = "infra"
)
exutil.By("Create custom infra MCP")
// We add no workers to the infra pool, it is not necessary
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
logger.Infof("OK!\n")
testContainerFile([]ContainerFile{}, MachineConfigNamespace, infraMcp, nil, true)
})
g.It("Author:sregidor-NonPreRelease-High-73494-[P1] OCB Wiring up Productionalized Build Controller. New 4.16 OCB API [Disruptive]", func() {
var (
infraMcpName = "infra"
moscName = "tc-73494-infra"
)
exutil.By("Create custom infra MCP")
// We add no workers to the infra pool, it is not necessary
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new infra MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, infraMcpName, nil)
defer mosc.CleanupAndDelete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
ValidateSuccessfulMOSC(mosc, nil)
exutil.By("Remove the MachineOSConfig resource")
o.Expect(mosc.CleanupAndDelete()).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
ValidateMOSCIsGarbageCollected(mosc, infraMcp)
exutil.AssertAllPodsToBeReady(oc.AsAdmin(), MachineConfigNamespace)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonPreRelease-Medium-73599-[P2] OCB Validate MachineOSConfig. New 41.6 OCB API [Disruptive]", func() {
var (
infraMcpName = "infra"
moscName = "tc-73599-infra"
pushSpec = fmt.Sprintf("%s/openshift-machine-config-operator/ocb-%s-image:latest", InternalRegistrySvcURL, infraMcpName)
pullSecret = NewSecret(oc.AsAdmin(), "openshift-config", "pull-secret")
fakePullSecretName = "fake-pull-secret"
expectedWrongPullSecretMsg = fmt.Sprintf(`could not validate baseImagePullSecret "%s" for MachineOSConfig %s: secret %s from %s is not found. Did you use the right secret name?`,
fakePullSecretName, moscName, fakePullSecretName, moscName)
fakePushSecretName = "fake-push-secret"
expectedWrongPushSecretMsg = fmt.Sprintf(`could not validate renderedImagePushSecret "%s" for MachineOSConfig %s: secret %s from %s is not found. Did you use the right secret name?`,
fakePushSecretName, moscName, fakePushSecretName, moscName)
fakeBuilderType = "FakeBuilderType"
expectedWrongBuilderTypeMsg = fmt.Sprintf(`Unsupported value: "%s": supported values: "Job"`, fakeBuilderType)
)
exutil.By("Create custom infra MCP")
// We add no workers to the infra pool, it is not necessary
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
logger.Infof("OK!\n")
exutil.By("Clone the pull-secret in MCO namespace")
clonedSecret, err := CloneResource(pullSecret, "cloned-pull-secret-"+exutil.GetRandomString(), MachineConfigNamespace, nil)
defer clonedSecret.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating the cluster's pull-secret in MCO namespace")
logger.Infof("OK!\n")
// Check behaviour when wrong pullSecret
checkMisconfiguredMOSC(oc.AsAdmin(), moscName, infraMcpName, fakePullSecretName, clonedSecret.GetName(), pushSpec, nil,
expectedWrongPullSecretMsg,
"Check that MOSC using wrong pull secret are failing as expected")
// Check behaviour when wrong pushSecret
checkMisconfiguredMOSC(oc.AsAdmin(), moscName, infraMcpName, clonedSecret.GetName(), fakePushSecretName, pushSpec, nil,
expectedWrongPushSecretMsg,
"Check that MOSC using wrong push secret are failing as expected")
// Try to create a MOSC with a wrong pushSpec
logger.Infof("Create a MachineOSConfig resource with a wrong builder type")
err = NewMCOTemplate(oc, "generic-machine-os-config.yaml").Create("-p", "NAME="+moscName, "POOL="+infraMcpName, "PULLSECRET="+clonedSecret.GetName(),
"PUSHSECRET="+clonedSecret.GetName(), "PUSHSPEC="+pushSpec, "IMAGEBUILDERTYPE="+fakeBuilderType)
o.Expect(err).To(o.HaveOccurred(), "Expected oc command to fail, but it didn't")
o.Expect(err).To(o.BeAssignableToTypeOf(&exutil.ExitError{}), "Unexpected error while creating the new MOSC")
o.Expect(err.(*exutil.ExitError).StdErr).To(o.ContainSubstring(expectedWrongBuilderTypeMsg),
"MSOC creation using wrong image type builder should be forbidden")
logger.Infof("OK!")
})
g.It("Author:ptalgulk-ConnectedOnly-Longduration-NonPreRelease-Critical-74645-Panic Condition for Non-Matching MOSC Resources [Disruptive]", func() {
var (
infraMcpName = "infra"
moscName = "tc-74645"
mcc = NewController(oc.AsAdmin())
)
exutil.By("Create New Custom MCP")
defer DeleteCustomMCP(oc.AsAdmin(), infraMcpName)
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 1)
o.Expect(err).NotTo(o.HaveOccurred(), "Could not create a new custom MCP")
node := infraMcp.GetNodesOrFail()[0]
logger.Infof("%s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new infra MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, infraMcpName, nil)
defer DisableOCL(mosc)
// remove after this bug is fixed OCPBUGS-36810
defer func() {
logger.Infof("Configmaps should also be deleted ")
cmList := NewConfigMapList(mosc.GetOC(), MachineConfigNamespace).GetAllOrFail()
for _, cm := range cmList {
if strings.Contains(cm.GetName(), "rendered-") {
o.Expect(cm.Delete()).Should(o.Succeed(), "The ConfigMap related to MOSC has not been removed")
}
}
}()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
exutil.By("Check that a new build has been triggered")
o.Eventually(mosc.GetCurrentMachineOSBuild, "5m", "20s").Should(Exist(),
"No build was created when OCB was enabled")
mosb, err := mosc.GetCurrentMachineOSBuild()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MOSB from MOSC")
o.Eventually(mosb.GetJob, "5m", "20s").Should(Exist(),
"No build pod was created when OCB was enabled")
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
exutil.By("Delete the MCOS and check it is deleted")
o.Expect(mosc.CleanupAndDelete()).To(o.Succeed(), "Error cleaning up %s", mosc)
ValidateMOSCIsGarbageCollected(mosc, infraMcp)
o.Expect(mosb).NotTo(Exist(), "Build is not deleted")
o.Expect(mosc).NotTo(Exist(), "MOSC is not deleted")
logger.Infof("OK!\n")
exutil.By("Check MCC Logs for Panic is not produced")
exutil.AssertAllPodsToBeReady(oc.AsAdmin(), MachineConfigNamespace)
mccPrevLogs, err := mcc.GetPreviousLogs()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting previous MCC logs")
o.Expect(mccPrevLogs).NotTo(o.Or(o.ContainSubstring("panic"), o.ContainSubstring("Panic")), "Panic is seen in MCC previous logs after deleting OCB resources:\n%s", mccPrevLogs)
mccLogs, err := mcc.GetLogs()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MCC logs")
o.Expect(mccLogs).NotTo(o.Or(o.ContainSubstring("panic"), o.ContainSubstring("Panic")), "Panic is seen in MCC logs after deleting OCB resources:\n%s", mccLogs)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Critical-73496-[P1] OCB use custom Containerfile. New 4.16 OCB API[Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
containerFileContent = `
# Pull the centos base image and enable the EPEL repository.
FROM quay.io/centos/centos:stream9 AS centos
RUN dnf install -y epel-release
# Pull an image containing the yq utility.
FROM quay.io/multi-arch/yq:4.25.3 AS yq
# Build the final OS image for this MachineConfigPool.
FROM configs AS final
# Copy the EPEL configs into the final image.
COPY --from=yq /usr/bin/yq /usr/bin/yq
COPY --from=centos /etc/yum.repos.d /etc/yum.repos.d
COPY --from=centos /etc/pki/rpm-gpg/RPM-GPG-KEY-* /etc/pki/rpm-gpg/
# Install cowsay and ripgrep from the EPEL repository into the final image,
# along with a custom cow file.
RUN sed -i 's/\$stream/9-stream/g' /etc/yum.repos.d/centos*.repo && \
rpm-ostree install cowsay ripgrep
`
checkers = []Checker{
CommandOutputChecker{
Command: []string{"cowsay", "-t", "hello"},
Matcher: o.ContainSubstring("< hello >"),
ErrorMsg: fmt.Sprintf("Cowsay is not working after installing the new image"),
Desc: fmt.Sprintf("Check that cowsay is installed and working"),
},
}
)
testContainerFile([]ContainerFile{{Content: containerFileContent}}, MachineConfigNamespace, mcp, checkers, false)
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Medium-78001-[P2] The etc-pki-etitlement secret is created automatically for OCB Use custom Containerfile with rhel enablement [Disruptive]", func() {
var (
entitlementSecret = NewSecret(oc.AsAdmin(), "openshift-config-managed", "etc-pki-entitlement")
containerFileContent = `
FROM configs AS final
RUN rm -rf /etc/rhsm-host && \
rpm-ostree install buildah && \
ln -s /run/secrets/rhsm /etc/rhsm-host && \
ostree container commit
`
checkers = []Checker{
CommandOutputChecker{
Command: []string{"rpm", "-q", "buildah"},
Matcher: o.ContainSubstring("buildah-"),
ErrorMsg: fmt.Sprintf("Buildah package is not installed after the image was deployed"),
Desc: fmt.Sprintf("Check that buildah is installed"),
},
}
mcp = GetCompactCompatiblePool(oc.AsAdmin())
)
if !entitlementSecret.Exists() {
g.Skip(fmt.Sprintf("There is no entitlement secret available in this cluster %s. This test case cannot be executed", entitlementSecret))
}
testContainerFile([]ContainerFile{{Content: containerFileContent}}, MachineConfigNamespace, mcp, checkers, false)
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-73947-OCB use OutputImage CurrentImagePullSecret [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
tmpNamespaceName = "tc-73947-mco-ocl-images"
checkers = []Checker{
CommandOutputChecker{
Command: []string{"rpm-ostree", "status"},
Matcher: o.ContainSubstring(fmt.Sprintf("%s/%s/ocb-%s-image", InternalRegistrySvcURL, tmpNamespaceName, mcp.GetName())),
ErrorMsg: fmt.Sprintf("The nodes are not using the expected OCL image stored in the internal registry"),
Desc: fmt.Sprintf("Check that the nodes are using the right OS image"),
},
}
)
testContainerFile([]ContainerFile{}, tmpNamespaceName, mcp, checkers, false)
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-72003-[P1] OCB Opting into on-cluster builds must respect maxUnavailable setting. Workers.[Disruptive]", func() {
SkipIfSNO(oc.AsAdmin()) // This test makes no sense in SNO
var (
moscName = "test-" + GetCurrentTestPolarionIDNumber()
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
workerNodes = wMcp.GetSortedNodesOrFail()
)
exutil.By("Configure maxUnavailable if worker pool has more than 2 nodes")
if len(workerNodes) > 2 {
wMcp.SetMaxUnavailable(2)
defer wMcp.RemoveMaxUnavailable()
}
maxUnavailable := exutil.OrFail[int](wMcp.GetMaxUnavailableInt())
logger.Infof("Current maxUnavailable value %d", maxUnavailable)
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new worker MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, wMcp.GetName(), nil)
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new worker MCP")
o.Eventually(wMcp.GetUpdatingStatus, "15m", "15s").Should(o.Equal("True"),
"The worker MCP did not start updating")
logger.Infof("OK!\n")
exutil.By("Poll the nodes sorted by the order they are updated")
updatedNodes := wMcp.GetSortedUpdatedNodes(maxUnavailable)
for _, n := range updatedNodes {
logger.Infof("updated node: %s created: %s zone: %s", n.GetName(), n.GetOrFail(`{.metadata.creationTimestamp}`), n.GetOrFail(`{.metadata.labels.topology\.kubernetes\.io/zone}`))
}
logger.Infof("OK!\n")
exutil.By("Wait for the configuration to be applied in all nodes")
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that nodes were updated in the right order")
rightOrder := checkUpdatedLists(workerNodes, updatedNodes, maxUnavailable)
o.Expect(rightOrder).To(o.BeTrue(), "Expected update order %s, but found order %s", workerNodes, updatedNodes)
logger.Infof("OK!\n")
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-73497-[P2] OCB build images in many MCPs at the same time [Disruptive]", func() {
SkipIfCompactOrSNO(oc.AsAdmin()) // This test makes no sense in SNO or compact
var (
customMCPNames = "infra"
numCustomPools = 5
moscList = []*MachineOSConfig{}
mcpList = []*MachineConfigPool{}
wg sync.WaitGroup
)
exutil.By("Create custom MCPS")
for i := 0; i < numCustomPools; i++ {
infraMcpName := fmt.Sprintf("%s-%d", customMCPNames, i)
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
mcpList = append(mcpList, infraMcp)
}
logger.Infof("OK!\n")
exutil.By("Checking that all MOSCs were executed properly")
for _, infraMcp := range mcpList {
moscName := fmt.Sprintf("mosc-%s", infraMcp.GetName())
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, infraMcp.GetName(), nil)
defer mosc.CleanupAndDelete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
moscList = append(moscList, mosc)
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
ValidateSuccessfulMOSC(mosc, nil)
}()
}
wg.Wait()
logger.Infof("OK!\n")
exutil.By("Removing all MOSC resources")
for _, mosc := range moscList {
o.Expect(mosc.CleanupAndDelete()).To(o.Succeed(), "Error cleaning up %s", mosc)
}
logger.Infof("OK!\n")
exutil.By("Validate that all resources were garbage collected")
for i := 0; i < numCustomPools; i++ {
ValidateMOSCIsGarbageCollected(moscList[i], mcpList[i])
}
logger.Infof("OK!\n")
exutil.AssertAllPodsToBeReady(oc.AsAdmin(), MachineConfigNamespace)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-Longduration-NonPreRelease-High-77498-OCB Trigger new build when renderedImagePushspec is updated [Disruptive]", func() {
var (
infraMcpName = "infra"
moscName = "tc-77498-infra"
)
exutil.By("Create custom infra MCP")
// We add no workers to the infra pool, it is not necessary
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new infra MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, infraMcpName, nil)
defer mosc.CleanupAndDelete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
ValidateSuccessfulMOSC(mosc, nil)
exutil.By("Set a new rendered image pull spec")
initialMOSB := exutil.OrFail[*MachineOSBuild](mosc.GetCurrentMachineOSBuild())
initialRIPS := exutil.OrFail[string](mosc.GetRenderedImagePushspec())
o.Expect(
mosc.SetRenderedImagePushspec(strings.ReplaceAll(initialRIPS, "ocb-", "ocb77498-")),
).NotTo(o.HaveOccurred(), "Error patching %s to set the new renderedImagePullSpec", mosc)
logger.Infof("OK!\n")
exutil.By("Check that a new build is triggered")
checkNewBuildIsTriggered(mosc, initialMOSB)
logger.Infof("OK!\n")
exutil.By("Set the original rendered image pull spec")
o.Expect(
mosc.SetRenderedImagePushspec(initialRIPS),
).NotTo(o.HaveOccurred(), "Error patching %s to set the new renderedImagePullSpec", mosc)
logger.Infof("OK!\n")
exutil.By("Check that the initial build is reused")
var currentMOSB *MachineOSBuild
o.Eventually(func() (string, error) {
currentMOSB, err = mosc.GetCurrentMachineOSBuild()
return currentMOSB.GetName(), err
}, "5m", "20s").Should(o.Equal(initialMOSB.GetName()),
"When the containerfiles were removed and initial MOSC configuration was restored, the initial MOSB was not used")
logger.Infof("OK!\n")
})
g.It("Author:sregidor-Longduration-NonPreRelease-High-77497-OCB Trigger new build when Containerfile is updated [Disruptive]", func() {
var (
infraMcpName = "infra"
moscName = "tc-77497-infra"
containerFile = ContainerFile{Content: "RUN touch /etc/test-add-containerfile"}
containerFileMod = ContainerFile{Content: "RUN touch /etc/test-modified-containerfile"}
)
exutil.By("Create custom infra MCP")
// We add no workers to the infra pool, it is not necessary
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new infra MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, infraMcpName, nil)
defer mosc.CleanupAndDelete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
ValidateSuccessfulMOSC(mosc, nil)
exutil.By("Add new container file")
initialMOSB := exutil.OrFail[*MachineOSBuild](mosc.GetCurrentMachineOSBuild())
o.Expect(
mosc.SetContainerfiles([]ContainerFile{containerFile}),
).NotTo(o.HaveOccurred(), "Error patching %s to add a container file", mosc)
logger.Infof("OK!\n")
exutil.By("Check that a new build is triggered when a containerfile is added")
checkNewBuildIsTriggered(mosc, initialMOSB)
logger.Infof("OK!\n")
exutil.By("Modify the container file")
currentMOSB := exutil.OrFail[*MachineOSBuild](mosc.GetCurrentMachineOSBuild())
o.Expect(
mosc.SetContainerfiles([]ContainerFile{containerFileMod}),
).NotTo(o.HaveOccurred(), "Error patching %s to modify an existing container file", mosc)
logger.Infof("OK!\n")
exutil.By("Check that a new build is triggered when a containerfile is modified")
checkNewBuildIsTriggered(mosc, currentMOSB)
logger.Infof("OK!\n")
exutil.By("Remove the container files")
o.Expect(
mosc.RemoveContainerfiles(),
).NotTo(o.HaveOccurred(), "Error patching %s to remove the configured container files", mosc)
logger.Infof("OK!\n")
exutil.By("Check that the initial build is reused")
o.Eventually(func() (string, error) {
currentMOSB, err = mosc.GetCurrentMachineOSBuild()
return currentMOSB.GetName(), err
}, "5m", "20s").Should(o.Equal(initialMOSB.GetName()),
"When the containerfiles were removed and initial MOSC configuration was restored, the initial MOSB was not used")
logger.Infof("OK!\n")
})
g.It("Author:sregidor-Longduration-NonPreRelease-High-77576-In OCB. Create a new MC while a build is running [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
node = mcp.GetSortedNodesOrFail()[0]
moscName = "test-" + mcp.GetName() + "-" + GetCurrentTestPolarionIDNumber()
fileMode = "0644" // decimal 420
filePath = "/etc/test-77576"
fileContent = "test file"
mcName = "tc-77576-testfile"
fileConfig = getBase64EncodedFileConfig(filePath, fileContent, fileMode)
mc = NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
)
exutil.By("Configure OCB functionality for the new worker MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, mcp.GetName(), nil)
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
exutil.By("Check that a new build has been triggered and is building")
var mosb *MachineOSBuild
o.Eventually(func() (*MachineOSBuild, error) {
var err error
mosb, err = mosc.GetCurrentMachineOSBuild()
return mosb, err
}, "5m", "20s").Should(Exist(),
"No build was created when OCB was enabled")
o.Eventually(mosb.GetJob, "5m", "20s").Should(Exist(),
"No build job was created when OCB was enabled")
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
exutil.By("Create a MC to trigger a new build")
defer mc.delete()
err = mc.Create("-p", "NAME="+mcName, "-p", "POOL="+mcp.GetName(), "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(err).NotTo(o.HaveOccurred())
logger.Infof("OK!\n")
exutil.By("Check that a new build is triggered and the old build is removed")
checkNewBuildIsTriggered(mosc, mosb)
o.Eventually(mosb, "2m", "20s").ShouldNot(Exist(), "The old MOSB %s was not deleted", mosb)
logger.Infof("OK!\n")
exutil.By("Wait for the configuration to be applied")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the MC was applied")
rf := NewRemoteFile(node, filePath)
o.Eventually(rf, "2m", "20s").Should(HaveContent(o.Equal(fileContent)),
"%s doesn't have the right content", rf)
logger.Infof("OK!\n")
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-Longduration-NonPreRelease-High-77781-OCB Rebuild a successful build [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
moscName = "test-" + mcp.GetName() + "-" + GetCurrentTestPolarionIDNumber()
)
exutil.By("Configure OCB functionality for the new worker MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, mcp.GetName(), nil)
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
ValidateSuccessfulMOSC(mosc, nil)
// rebuild the image and check that the image is properly applied in the nodes
RebuildImageAndCheck(mosc)
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-Longduration-NonPreRelease-High-77782-[P2] OCB Rebuild an interrupted build [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
moscName = "test-" + mcp.GetName() + "-" + GetCurrentTestPolarionIDNumber()
)
exutil.By("Configure OCB functionality for the new worker MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, mcp.GetName(), nil)
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
exutil.By("Wait until MOSB starts building")
var mosb *MachineOSBuild
var job *Job
o.Eventually(func() (*MachineOSBuild, error) {
var err error
mosb, err = mosc.GetCurrentMachineOSBuild()
return mosb, err
}, "5m", "20s").Should(Exist(),
"No build was created when OCB was enabled")
o.Eventually(func() (*Job, error) {
var err error
job, err = mosb.GetJob()
return job, err
}, "5m", "20s").Should(Exist(),
"No build job was created when OCB was enabled")
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
exutil.By("Interrupt the build")
o.Expect(job.Delete()).To(o.Succeed(),
"Error deleting %s", job)
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Interrupted", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
// TODO: what's the intended MCP status when a build is interrupted? We need to check this status here
// rebuild the image and check that the image is properly applied in the nodes
RebuildImageAndCheck(mosc)
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
})
g.It("Author:ptalgulk-ConnectedOnly-Longduration-NonPreRelease-Medium-77977-Install extension after OCB is enabled [Disruptive]", func() {
var (
moscName = "test-" + GetCurrentTestPolarionIDNumber()
mcp = GetCompactCompatiblePool(oc.AsAdmin())
node = mcp.GetSortedNodesOrFail()[0]
mcName = "test-install-extenstion-" + GetCurrentTestPolarionIDNumber()
)
exutil.By("Configure OCB functionality for the new worker MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, mcp.GetName(), nil)
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
ValidateSuccessfulMOSC(mosc, nil)
exutil.By("Create a MC")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetParams(`EXTENSIONS=["usbguard"]`)
defer mc.delete()
mc.create()
exutil.By("Wait for the configuration to be applied")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify worker node includes usbguard extenstion")
o.Expect(
node.DebugNodeWithChroot("rpm", "-q", "usbguard"),
).Should(o.ContainSubstring("usbguard-"), "usbguard has not been installed")
logger.Infof("OK!\n")
exutil.By("Delete a MC.")
mc.delete()
logger.Infof("OK!\n")
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
ValidateMOSCIsGarbageCollected(mosc, mcp)
logger.Infof("OK!\n")
})
g.It("Author:ptalgulk-ConnectedOnly-Longduration-NonPreRelease-Medium-78196-Verify for etc-pki-etitlement secret is removed for OCB rhel enablement [Disruptive]", func() {
var (
entitlementSecret = NewSecret(oc.AsAdmin(), "openshift-config-managed", "etc-pki-entitlement")
containerFileContent = `
FROM configs AS final
RUN rm -rf /etc/rhsm-host && \
rpm-ostree install buildah && \
ln -s /run/secrets/rhsm /etc/rhsm-host && \
ostree container commit
`
mcp = GetCompactCompatiblePool(oc.AsAdmin())
)
if !entitlementSecret.Exists() {
g.Skip(fmt.Sprintf("There is no entitlement secret available in this cluster %s. This test case cannot be executed", entitlementSecret))
}
exutil.By("Copy the entitlement secret in MCO namespace")
mcoEntitlementSecret, err := CloneResource(entitlementSecret, "etc-pki-entitlement", MachineConfigNamespace, nil)
defer mcoEntitlementSecret.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error copying %s to the %s namespace", mcoEntitlementSecret, MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Delete the entitlement secret in the openshift-config-managed namespace")
defer func() {
exutil.By("Recover the entitlement secret in the openshift-config-managed namespace")
recoverSecret, err := CloneResource(mcoEntitlementSecret, "etc-pki-entitlement", "openshift-config-managed", nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Error copying %s to the openshift-config-managed namespace", entitlementSecret)
o.Expect(recoverSecret).To(Exist(), "Unable to recover the entitlement secret in openshift-config-managed namespace")
}()
entitlementSecret.Delete()
logger.Infof("OK!\n")
exutil.By("Create the MOSC")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, "test-78196-mosc", mcp.GetName(), []ContainerFile{{Content: containerFileContent}})
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
exutil.By("Check that a new build has been triggered")
o.Eventually(mosc.GetCurrentMachineOSBuild, "5m", "20s").Should(Exist(),
"No build was created when OCB was enabled")
mosb, err := mosc.GetCurrentMachineOSBuild()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MOSB from MOSC")
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
exutil.By("Verify the error is produced in buildPod")
exutil.AssertAllPodsToBeReady(oc.AsAdmin(), MachineConfigNamespace)
logger.Infof("OK!\n")
job, err := mosb.GetJob()
o.Expect(err).NotTo(o.HaveOccurred())
// Currently this kind of resources are leaked. Until the leak is fixed we need to make sure that this job is removed
// because its pods are in "Error" status and there are other test cases checking that no pod is reporting any error.
// TODO: remove this once the leak is fixed
defer job.Delete()
logger.Infof("OK!\n")
o.Eventually(job.Logs, "5m", "10s").Should(o.ContainSubstring("Found 0 entitlement certificates"), "Error getting the logs")
o.Eventually(job, "15m", "20s").Should(HaveConditionField("Failed", "status", TrueString), "Job didn't fail")
logger.Infof("OK!\n")
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
ValidateMOSCIsGarbageCollected(mosc, mcp)
logger.Infof("OK!\n")
})
})
func testContainerFile(containerFiles []ContainerFile, imageNamespace string, mcp *MachineConfigPool, checkers []Checker, defaultPullSecret bool) {
var (
oc = mcp.GetOC().AsAdmin()
mcpList = NewMachineConfigPoolList(oc.AsAdmin())
moscName = "test-" + GetCurrentTestPolarionIDNumber()
mosc *MachineOSConfig
err error
)
switch imageNamespace {
case MachineConfigNamespace:
exutil.By("Configure OCB functionality for the new infra MCP. Create MOSC")
mosc, err = createMachineOSConfigUsingExternalOrInternalRegistry(oc, MachineConfigNamespace, moscName, mcp.GetName(), containerFiles, defaultPullSecret)
default:
SkipTestIfCannotUseInternalRegistry(mcp.GetOC())
exutil.By("Capture the current pull-secret value")
// We don't use the pullSecret resource directly, instead we use auxiliary functions that will
// extract and restore the secret's values using a file. Like that we can recover the value of the pull-secret
// if our execution goes wrong, without printing it in the logs (for security reasons).
secretFile, sErr := getPullSecret(oc)
o.Expect(sErr).NotTo(o.HaveOccurred(), "Error getting the pull-secret")
logger.Debugf("Pull-secret content stored in file %s", secretFile)
defer func() {
logger.Infof("Restoring initial pull-secret value")
output, sErr := setDataForPullSecret(oc, secretFile)
if sErr != nil {
logger.Errorf("Error restoring the pull-secret's value. Error: %s\nOutput: %s", err, output)
}
mcpList.waitForComplete()
}()
logger.Infof("OK!\n")
exutil.By("Create namespace to store the osImage")
tmpNamespace := NewResource(oc.AsAdmin(), "ns", imageNamespace)
if !tmpNamespace.Exists() {
defer tmpNamespace.Delete()
o.Expect(oc.AsAdmin().WithoutNamespace().Run("new-project").Args(tmpNamespace.GetName(), "--skip-config-write").Execute()).To(o.Succeed(), "Error creating a new project to store the OCL images")
}
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new infra MCP. Create MOSC")
mosc, err = CreateMachineOSConfigUsingInternalRegistry(oc, tmpNamespace.GetName(), moscName, mcp.GetName(), containerFiles, defaultPullSecret)
}
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
verifyEntitlementSecretIsPresent(oc.AsAdmin(), mcp)
ValidateSuccessfulMOSC(mosc, checkers)
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
ValidateMOSCIsGarbageCollected(mosc, mcp)
}
func skipTestIfOCBIsEnabled(oc *exutil.CLI) {
moscl := NewMachineOSConfigList(oc)
allMosc := moscl.GetAllOrFail()
if len(allMosc) != 0 {
moscl.PrintDebugCommand()
g.Skip(fmt.Sprintf("To run this test case we need that OCB is not enabled in any pool. At least %s OBC is enabled in this cluster.", allMosc[0]))
}
}
func checkMisconfiguredMOSC(oc *exutil.CLI, moscName, poolName, baseImagePullSecret, renderedImagePushSecret, pushSpec string, containerFile []ContainerFile,
expectedMsg, stepMgs string) {
var (
machineConfigCO = NewResource(oc.AsAdmin(), "co", "machine-config")
)
exutil.By(stepMgs)
defer logger.Infof("OK!\n")
logger.Infof("Create a misconfiugred MOSC")
mosc, err := CreateMachineOSConfig(oc, moscName, poolName, baseImagePullSecret, renderedImagePushSecret, pushSpec, containerFile)
defer mosc.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating MOSC with wrong pull secret")
logger.Infof("OK!")
logger.Infof("Expect machine-config CO to be degraded")
o.Eventually(machineConfigCO, "5m", "20s").Should(BeDegraded(),
"%s should be degraded when a MOSC is configured with a wrong pull secret", machineConfigCO)
o.Eventually(machineConfigCO, "1m", "20s").Should(HaveConditionField("Degraded", "message", o.ContainSubstring(expectedMsg)),
"%s should be degraded when a MOSC is configured with a wrong pull secret", machineConfigCO)
logger.Infof("OK!")
logger.Infof("Delete the offending MOSC")
o.Expect(mosc.Delete()).To(o.Succeed(), "Error deleing the offendint MOSC %s", mosc)
logger.Infof("OK!")
logger.Infof("CHeck that machine-config CO is not degraded anymore")
o.Eventually(machineConfigCO, "5m", "20s").ShouldNot(BeDegraded(),
"%s should stop being degraded when the offending MOSC is deleted", machineConfigCO)
}
// ValidateMOSCIsGarbageCollected makes sure that all resources related to the provided MOSC have been removed
func ValidateMOSCIsGarbageCollected(mosc *MachineOSConfig, mcp *MachineConfigPool) {
exutil.By("Check that the OCB resources are cleaned up")
logger.Infof("Validating that MOSB resources were garbage collected")
NewMachineOSBuildList(mosc.GetOC()).PrintDebugCommand() // for debugging purposes
o.Eventually(mosc.GetMachineOSBuildList, "2m", "20s").Should(o.HaveLen(0), "MachineSOBuilds were not cleaned when %s was removed", mosc)
logger.Infof("Validating that machine-os-builder pod was garbage collected")
mOSBuilder := NewNamespacedResource(mosc.GetOC().AsAdmin(), "deployment", MachineConfigNamespace, "machine-os-builder")
o.Eventually(mOSBuilder, "2m", "30s").ShouldNot(Exist(),
"The machine-os-builder deployment was not removed when the infra pool was unlabeled")
logger.Infof("Validating that configmaps were garbage collected")
for _, cm := range NewConfigMapList(mosc.GetOC(), MachineConfigNamespace).GetAllOrFail() {
o.Expect(cm.GetName()).NotTo(o.ContainSubstring("rendered-"+mcp.GetName()),
"%s should have been garbage collected by OCB when the %s was deleted", cm, mosc)
}
logger.Infof("OK!")
exutil.By("Verify the etc-pki-entitlement secret is removed")
oc := mosc.GetOC()
secretName := fmt.Sprintf("etc-pki-entitlement-%s", mcp.GetName())
entitlementSecretInMco := NewSecret(oc.AsAdmin(), "openshift-machine-config-operator", secretName)
o.Eventually(entitlementSecretInMco.Exists, "5m", "30s").Should(o.BeFalse(), "Error etc-pki-entitlement should not exist")
logger.Infof("OK!\n")
}
// ValidateSuccessfulMOSC check that the provided MOSC is successfully applied
func ValidateSuccessfulMOSC(mosc *MachineOSConfig, checkers []Checker) {
mcp, err := mosc.GetMachineConfigPool()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the MCP for %s", mosc)
exutil.By("Check that the deployment machine-os-builder is created")
mOSBuilder := NewNamespacedResource(mosc.GetOC(), "deployment", MachineConfigNamespace, "machine-os-builder")
o.Eventually(mOSBuilder, "5m", "30s").Should(Exist(),
"The machine-os-builder deployment was not created when the OCB functionality was enabled in the infra pool")
o.Expect(mOSBuilder.Get(`{.spec.template.spec.containers[?(@.name=="machine-os-builder")].command}`)).To(o.ContainSubstring("machine-os-builder"),
"Error the machine-os-builder is not invoking the machine-os-builder binary")
o.Eventually(mOSBuilder.Get, "3m", "30s").WithArguments(`{.spec.replicas}`).Should(o.Equal("1"),
"The machine-os-builder deployment was created but the configured number of replicas is not the expected one")
o.Eventually(mOSBuilder.Get, "2m", "30s").WithArguments(`{.status.availableReplicas}`).Should(o.Equal("1"),
"The machine-os-builder deployment was created but the available number of replicas is not the expected one")
exutil.AssertAllPodsToBeReady(mosc.GetOC(), MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Check that the machine-os-builder is using leader election without failing")
o.Expect(mOSBuilder.Logs()).To(o.And(
o.ContainSubstring("attempting to acquire leader lease openshift-machine-config-operator/machine-os-builder"),
o.ContainSubstring("successfully acquired lease openshift-machine-config-operator/machine-os-builder")),
"The machine os builder pod is not using the leader election without failures")
logger.Infof("OK!\n")
exutil.By("Check that a new build has been triggered")
o.Eventually(mosc.GetCurrentMachineOSBuild, "5m", "20s").Should(Exist(),
"No build was created when OCB was enabled")
mosb, err := mosc.GetCurrentMachineOSBuild()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MOSB from MOSC")
o.Eventually(mosb.GetJob, "2m", "20s").Should(Exist(),
"No build job was created when OCB was enabled")
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
exutil.By("Check that a new build is successfully executed")
o.Eventually(mosb, "20m", "20s").Should(HaveConditionField("Building", "status", FalseString), "Build was not finished")
o.Eventually(mosb, "10m", "20s").Should(HaveConditionField("Succeeded", "status", TrueString), "Build didn't succeed")
o.Eventually(mosb, "2m", "20s").Should(HaveConditionField("Interrupted", "status", FalseString), "Build was interrupted")
o.Eventually(mosb, "2m", "20s").Should(HaveConditionField("Failed", "status", FalseString), "Build was failed")
logger.Infof("Check that the build job was deleted")
o.Eventually(mosb.GetJob, "2m", "20s").ShouldNot(Exist(), "Build job was not cleaned")
logger.Infof("OK!\n")
numNodes, err := mcp.getMachineCount()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MachineCount from %s", mcp)
if numNodes > 0 {
exutil.By("Wait for the new image to be applied")
mcp.waitForComplete()
logger.Infof("OK!\n")
node := mcp.GetSortedNodesOrFail()[0]
exutil.By("Check that the right image is deployed in the nodes")
currentImagePullSpec, err := mosc.GetStatusCurrentImagePullSpec()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the current image pull spec in %s", mosc)
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(currentImagePullSpec),
"The image installed in node %s is not the expected one", mosc)
logger.Infof("OK!\n")
for _, checker := range checkers {
checker.Check(node)
}
} else {
logger.Infof("There is no node configured in %s. We don't wait for the configuration to be applied", mcp)
}
}
// DisableOCL this function disables OCL.
func DisableOCL(mosc *MachineOSConfig) error {
if !mosc.Exists() {
logger.Infof("%s does not exist. No need to remove/disable it", mosc)
return nil
}
mcp, err := mosc.GetMachineConfigPool()
if err != nil {
return err
}
currentOSImageSpec, err := mosc.GetStatusCurrentImagePullSpec()
if err != nil {
return err
}
err = mosc.CleanupAndDelete()
if err != nil {
return err
}
nodes, err := mcp.GetCoreOsNodes()
if err != nil {
return err
}
if len(nodes) > 0 {
node := nodes[0]
mcp.waitForComplete()
o.Expect(node.GetCurrentBootOSImage()).NotTo(o.Equal(currentOSImageSpec),
"OCL was disabled in %s but the OCL image is still used in %s", node)
} else {
logger.Infof("There is no coreos node configured in %s. We don't wait for the configuration to be applied and we don't execute any verification on the nodes", mcp)
}
return nil
}
// checkNewBuildIsTriggered executes the necessary validations to make sure that a new build was triggered and succeeded. Fails the test case if validaions fail
func checkNewBuildIsTriggered(mosc *MachineOSConfig, currentMOSB *MachineOSBuild) {
var (
newMOSB *MachineOSBuild
err error
)
logger.Infof("Current mosb: %s", currentMOSB)
o.Eventually(func() (string, error) {
newMOSB, err = mosc.GetCurrentMachineOSBuild()
return newMOSB.GetName(), err
}, "5m", "20s").ShouldNot(o.Equal(currentMOSB.GetName()),
"A new MOSB should be created after the new rendered image pull spec is configured")
logger.Infof("New mosb: %s", newMOSB)
o.Eventually(newMOSB, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
o.Eventually(newMOSB, "20m", "20s").Should(HaveConditionField("Building", "status", FalseString), "Build was not finished")
o.Eventually(newMOSB, "10m", "20s").Should(HaveConditionField("Succeeded", "status", TrueString), "Build didn't succeed")
o.Eventually(newMOSB, "2m", "20s").Should(HaveConditionField("Interrupted", "status", FalseString), "Build was interrupted")
o.Eventually(newMOSB, "2m", "20s").Should(HaveConditionField("Failed", "status", FalseString), "Build was failed")
}
func verifyEntitlementSecretIsPresent(oc *exutil.CLI, mcp *MachineConfigPool) {
entitlementSecret := NewSecret(oc.AsAdmin(), "openshift-config-managed", "etc-pki-entitlement")
secretName := fmt.Sprintf("etc-pki-entitlement-%s", mcp.GetName())
entitlementSecretInMco := NewSecret(oc.AsAdmin(), "openshift-machine-config-operator", secretName)
exutil.By("Verify the etc-pki-entitlement secret is present in openshift-config-managed namespace ")
if entitlementSecret.Exists() {
exutil.By("Verify the etc-pki-entitlement secret is present")
logger.Infof("%s\n", entitlementSecretInMco)
o.Eventually(entitlementSecretInMco.Exists, "5m", "30s").Should(o.BeTrue(), "Error etc-pki-entitlement should exist")
logger.Infof("OK!\n")
} else {
logger.Infof("etc-pki-entitlement does not exist in openshift-config-managed namespace")
}
}
// RebuildImageAndCheck rebuild the latest image of the MachineOSConfig resource and checks that it is properly built and applied
func RebuildImageAndCheck(mosc *MachineOSConfig) {
exutil.By("Rebuild the current image")
var (
mcp = exutil.OrFail[*MachineConfigPool](mosc.GetMachineConfigPool())
mosb = exutil.OrFail[*MachineOSBuild](mosc.GetCurrentMachineOSBuild())
currentImagePullSpec = exutil.OrFail[string](mosc.GetStatusCurrentImagePullSpec())
)
o.Expect(mosc.Rebuild()).To(o.Succeed(),
"Error patching %s to rebuild the current image", mosc)
logger.Infof("OK!\n")
exutil.By("Check that the existing MOSB is reused and it builds a new image")
o.Eventually(mosb.GetJob, "2m", "20s").Should(Exist(), "Rebuild job was not created")
o.Eventually(mosb, "20m", "20s").Should(HaveConditionField("Building", "status", FalseString), "Rebuild was not finished")
o.Eventually(mosb, "10m", "20s").Should(HaveConditionField("Succeeded", "status", TrueString), "Rebuild didn't succeed")
o.Eventually(mosb, "2m", "20s").Should(HaveConditionField("Interrupted", "status", FalseString), "Reuild was interrupted")
o.Eventually(mosb, "2m", "20s").Should(HaveConditionField("Failed", "status", FalseString), "Reuild was failed")
logger.Infof("Check that the rebuild job was deleted")
o.Eventually(mosb.GetJob, "2m", "20s").ShouldNot(Exist(), "Build job was not cleaned")
logger.Infof("OK!\n")
exutil.By("Wait for the new image to be applied")
nodes, err := mcp.GetCoreOsNodes()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting coreos nodes from %s", mcp)
if len(nodes) > 0 {
node := nodes[0]
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the new image is the one used in the nodes")
newImagePullSpec := exutil.OrFail[string](mosc.GetStatusCurrentImagePullSpec())
o.Expect(newImagePullSpec).NotTo(o.Equal(currentImagePullSpec),
"The new image after the rebuild operation should be different fron the initial image")
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(newImagePullSpec),
"The new image is not being used in node %s", node)
logger.Infof("OK!\n")
} else {
logger.Infof("There is no coreos node configured in %s. We don't wait for the configuration to be applied and we don't execute any verification on the nodes", mcp)
logger.Infof("OK!\n")
}
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
8d1359f6-cc38-4172-8fa4-23b2ca30003d
|
testContainerFile
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
func testContainerFile(containerFiles []ContainerFile, imageNamespace string, mcp *MachineConfigPool, checkers []Checker, defaultPullSecret bool) {
var (
oc = mcp.GetOC().AsAdmin()
mcpList = NewMachineConfigPoolList(oc.AsAdmin())
moscName = "test-" + GetCurrentTestPolarionIDNumber()
mosc *MachineOSConfig
err error
)
switch imageNamespace {
case MachineConfigNamespace:
exutil.By("Configure OCB functionality for the new infra MCP. Create MOSC")
mosc, err = createMachineOSConfigUsingExternalOrInternalRegistry(oc, MachineConfigNamespace, moscName, mcp.GetName(), containerFiles, defaultPullSecret)
default:
SkipTestIfCannotUseInternalRegistry(mcp.GetOC())
exutil.By("Capture the current pull-secret value")
// We don't use the pullSecret resource directly, instead we use auxiliary functions that will
// extract and restore the secret's values using a file. Like that we can recover the value of the pull-secret
// if our execution goes wrong, without printing it in the logs (for security reasons).
secretFile, sErr := getPullSecret(oc)
o.Expect(sErr).NotTo(o.HaveOccurred(), "Error getting the pull-secret")
logger.Debugf("Pull-secret content stored in file %s", secretFile)
defer func() {
logger.Infof("Restoring initial pull-secret value")
output, sErr := setDataForPullSecret(oc, secretFile)
if sErr != nil {
logger.Errorf("Error restoring the pull-secret's value. Error: %s\nOutput: %s", err, output)
}
mcpList.waitForComplete()
}()
logger.Infof("OK!\n")
exutil.By("Create namespace to store the osImage")
tmpNamespace := NewResource(oc.AsAdmin(), "ns", imageNamespace)
if !tmpNamespace.Exists() {
defer tmpNamespace.Delete()
o.Expect(oc.AsAdmin().WithoutNamespace().Run("new-project").Args(tmpNamespace.GetName(), "--skip-config-write").Execute()).To(o.Succeed(), "Error creating a new project to store the OCL images")
}
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new infra MCP. Create MOSC")
mosc, err = CreateMachineOSConfigUsingInternalRegistry(oc, tmpNamespace.GetName(), moscName, mcp.GetName(), containerFiles, defaultPullSecret)
}
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
verifyEntitlementSecretIsPresent(oc.AsAdmin(), mcp)
ValidateSuccessfulMOSC(mosc, checkers)
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
ValidateMOSCIsGarbageCollected(mosc, mcp)
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
d91ea062-2829-4f59-a3c6-50f4c1858ca2
|
skipTestIfOCBIsEnabled
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
func skipTestIfOCBIsEnabled(oc *exutil.CLI) {
moscl := NewMachineOSConfigList(oc)
allMosc := moscl.GetAllOrFail()
if len(allMosc) != 0 {
moscl.PrintDebugCommand()
g.Skip(fmt.Sprintf("To run this test case we need that OCB is not enabled in any pool. At least %s OBC is enabled in this cluster.", allMosc[0]))
}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
51866eea-f4ad-49ea-8933-53ab0110a52f
|
checkMisconfiguredMOSC
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
func checkMisconfiguredMOSC(oc *exutil.CLI, moscName, poolName, baseImagePullSecret, renderedImagePushSecret, pushSpec string, containerFile []ContainerFile,
expectedMsg, stepMgs string) {
var (
machineConfigCO = NewResource(oc.AsAdmin(), "co", "machine-config")
)
exutil.By(stepMgs)
defer logger.Infof("OK!\n")
logger.Infof("Create a misconfiugred MOSC")
mosc, err := CreateMachineOSConfig(oc, moscName, poolName, baseImagePullSecret, renderedImagePushSecret, pushSpec, containerFile)
defer mosc.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating MOSC with wrong pull secret")
logger.Infof("OK!")
logger.Infof("Expect machine-config CO to be degraded")
o.Eventually(machineConfigCO, "5m", "20s").Should(BeDegraded(),
"%s should be degraded when a MOSC is configured with a wrong pull secret", machineConfigCO)
o.Eventually(machineConfigCO, "1m", "20s").Should(HaveConditionField("Degraded", "message", o.ContainSubstring(expectedMsg)),
"%s should be degraded when a MOSC is configured with a wrong pull secret", machineConfigCO)
logger.Infof("OK!")
logger.Infof("Delete the offending MOSC")
o.Expect(mosc.Delete()).To(o.Succeed(), "Error deleing the offendint MOSC %s", mosc)
logger.Infof("OK!")
logger.Infof("CHeck that machine-config CO is not degraded anymore")
o.Eventually(machineConfigCO, "5m", "20s").ShouldNot(BeDegraded(),
"%s should stop being degraded when the offending MOSC is deleted", machineConfigCO)
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
6c28b0c8-e8cc-47c4-9972-9ad75b2a0242
|
ValidateMOSCIsGarbageCollected
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
func ValidateMOSCIsGarbageCollected(mosc *MachineOSConfig, mcp *MachineConfigPool) {
exutil.By("Check that the OCB resources are cleaned up")
logger.Infof("Validating that MOSB resources were garbage collected")
NewMachineOSBuildList(mosc.GetOC()).PrintDebugCommand() // for debugging purposes
o.Eventually(mosc.GetMachineOSBuildList, "2m", "20s").Should(o.HaveLen(0), "MachineSOBuilds were not cleaned when %s was removed", mosc)
logger.Infof("Validating that machine-os-builder pod was garbage collected")
mOSBuilder := NewNamespacedResource(mosc.GetOC().AsAdmin(), "deployment", MachineConfigNamespace, "machine-os-builder")
o.Eventually(mOSBuilder, "2m", "30s").ShouldNot(Exist(),
"The machine-os-builder deployment was not removed when the infra pool was unlabeled")
logger.Infof("Validating that configmaps were garbage collected")
for _, cm := range NewConfigMapList(mosc.GetOC(), MachineConfigNamespace).GetAllOrFail() {
o.Expect(cm.GetName()).NotTo(o.ContainSubstring("rendered-"+mcp.GetName()),
"%s should have been garbage collected by OCB when the %s was deleted", cm, mosc)
}
logger.Infof("OK!")
exutil.By("Verify the etc-pki-entitlement secret is removed")
oc := mosc.GetOC()
secretName := fmt.Sprintf("etc-pki-entitlement-%s", mcp.GetName())
entitlementSecretInMco := NewSecret(oc.AsAdmin(), "openshift-machine-config-operator", secretName)
o.Eventually(entitlementSecretInMco.Exists, "5m", "30s").Should(o.BeFalse(), "Error etc-pki-entitlement should not exist")
logger.Infof("OK!\n")
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
58825ab2-a0d4-4131-a516-4f9b8a7c93bb
|
ValidateSuccessfulMOSC
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
func ValidateSuccessfulMOSC(mosc *MachineOSConfig, checkers []Checker) {
mcp, err := mosc.GetMachineConfigPool()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the MCP for %s", mosc)
exutil.By("Check that the deployment machine-os-builder is created")
mOSBuilder := NewNamespacedResource(mosc.GetOC(), "deployment", MachineConfigNamespace, "machine-os-builder")
o.Eventually(mOSBuilder, "5m", "30s").Should(Exist(),
"The machine-os-builder deployment was not created when the OCB functionality was enabled in the infra pool")
o.Expect(mOSBuilder.Get(`{.spec.template.spec.containers[?(@.name=="machine-os-builder")].command}`)).To(o.ContainSubstring("machine-os-builder"),
"Error the machine-os-builder is not invoking the machine-os-builder binary")
o.Eventually(mOSBuilder.Get, "3m", "30s").WithArguments(`{.spec.replicas}`).Should(o.Equal("1"),
"The machine-os-builder deployment was created but the configured number of replicas is not the expected one")
o.Eventually(mOSBuilder.Get, "2m", "30s").WithArguments(`{.status.availableReplicas}`).Should(o.Equal("1"),
"The machine-os-builder deployment was created but the available number of replicas is not the expected one")
exutil.AssertAllPodsToBeReady(mosc.GetOC(), MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Check that the machine-os-builder is using leader election without failing")
o.Expect(mOSBuilder.Logs()).To(o.And(
o.ContainSubstring("attempting to acquire leader lease openshift-machine-config-operator/machine-os-builder"),
o.ContainSubstring("successfully acquired lease openshift-machine-config-operator/machine-os-builder")),
"The machine os builder pod is not using the leader election without failures")
logger.Infof("OK!\n")
exutil.By("Check that a new build has been triggered")
o.Eventually(mosc.GetCurrentMachineOSBuild, "5m", "20s").Should(Exist(),
"No build was created when OCB was enabled")
mosb, err := mosc.GetCurrentMachineOSBuild()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MOSB from MOSC")
o.Eventually(mosb.GetJob, "2m", "20s").Should(Exist(),
"No build job was created when OCB was enabled")
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
exutil.By("Check that a new build is successfully executed")
o.Eventually(mosb, "20m", "20s").Should(HaveConditionField("Building", "status", FalseString), "Build was not finished")
o.Eventually(mosb, "10m", "20s").Should(HaveConditionField("Succeeded", "status", TrueString), "Build didn't succeed")
o.Eventually(mosb, "2m", "20s").Should(HaveConditionField("Interrupted", "status", FalseString), "Build was interrupted")
o.Eventually(mosb, "2m", "20s").Should(HaveConditionField("Failed", "status", FalseString), "Build was failed")
logger.Infof("Check that the build job was deleted")
o.Eventually(mosb.GetJob, "2m", "20s").ShouldNot(Exist(), "Build job was not cleaned")
logger.Infof("OK!\n")
numNodes, err := mcp.getMachineCount()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MachineCount from %s", mcp)
if numNodes > 0 {
exutil.By("Wait for the new image to be applied")
mcp.waitForComplete()
logger.Infof("OK!\n")
node := mcp.GetSortedNodesOrFail()[0]
exutil.By("Check that the right image is deployed in the nodes")
currentImagePullSpec, err := mosc.GetStatusCurrentImagePullSpec()
o.Expect(err).NotTo(o.HaveOccurred(),
"Error getting the current image pull spec in %s", mosc)
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(currentImagePullSpec),
"The image installed in node %s is not the expected one", mosc)
logger.Infof("OK!\n")
for _, checker := range checkers {
checker.Check(node)
}
} else {
logger.Infof("There is no node configured in %s. We don't wait for the configuration to be applied", mcp)
}
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
0f37135c-93f6-4dd9-a7f5-61aeb0dc7b99
|
DisableOCL
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
func DisableOCL(mosc *MachineOSConfig) error {
if !mosc.Exists() {
logger.Infof("%s does not exist. No need to remove/disable it", mosc)
return nil
}
mcp, err := mosc.GetMachineConfigPool()
if err != nil {
return err
}
currentOSImageSpec, err := mosc.GetStatusCurrentImagePullSpec()
if err != nil {
return err
}
err = mosc.CleanupAndDelete()
if err != nil {
return err
}
nodes, err := mcp.GetCoreOsNodes()
if err != nil {
return err
}
if len(nodes) > 0 {
node := nodes[0]
mcp.waitForComplete()
o.Expect(node.GetCurrentBootOSImage()).NotTo(o.Equal(currentOSImageSpec),
"OCL was disabled in %s but the OCL image is still used in %s", node)
} else {
logger.Infof("There is no coreos node configured in %s. We don't wait for the configuration to be applied and we don't execute any verification on the nodes", mcp)
}
return nil
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
b88f4dab-22ec-4bc0-99e3-ac65b21cb36d
|
checkNewBuildIsTriggered
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
func checkNewBuildIsTriggered(mosc *MachineOSConfig, currentMOSB *MachineOSBuild) {
var (
newMOSB *MachineOSBuild
err error
)
logger.Infof("Current mosb: %s", currentMOSB)
o.Eventually(func() (string, error) {
newMOSB, err = mosc.GetCurrentMachineOSBuild()
return newMOSB.GetName(), err
}, "5m", "20s").ShouldNot(o.Equal(currentMOSB.GetName()),
"A new MOSB should be created after the new rendered image pull spec is configured")
logger.Infof("New mosb: %s", newMOSB)
o.Eventually(newMOSB, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
o.Eventually(newMOSB, "20m", "20s").Should(HaveConditionField("Building", "status", FalseString), "Build was not finished")
o.Eventually(newMOSB, "10m", "20s").Should(HaveConditionField("Succeeded", "status", TrueString), "Build didn't succeed")
o.Eventually(newMOSB, "2m", "20s").Should(HaveConditionField("Interrupted", "status", FalseString), "Build was interrupted")
o.Eventually(newMOSB, "2m", "20s").Should(HaveConditionField("Failed", "status", FalseString), "Build was failed")
}
|
mco
| |||||
function
|
openshift/openshift-tests-private
|
9b3f785a-125e-4986-bf3e-2472022a722a
|
verifyEntitlementSecretIsPresent
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
func verifyEntitlementSecretIsPresent(oc *exutil.CLI, mcp *MachineConfigPool) {
entitlementSecret := NewSecret(oc.AsAdmin(), "openshift-config-managed", "etc-pki-entitlement")
secretName := fmt.Sprintf("etc-pki-entitlement-%s", mcp.GetName())
entitlementSecretInMco := NewSecret(oc.AsAdmin(), "openshift-machine-config-operator", secretName)
exutil.By("Verify the etc-pki-entitlement secret is present in openshift-config-managed namespace ")
if entitlementSecret.Exists() {
exutil.By("Verify the etc-pki-entitlement secret is present")
logger.Infof("%s\n", entitlementSecretInMco)
o.Eventually(entitlementSecretInMco.Exists, "5m", "30s").Should(o.BeTrue(), "Error etc-pki-entitlement should exist")
logger.Infof("OK!\n")
} else {
logger.Infof("etc-pki-entitlement does not exist in openshift-config-managed namespace")
}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
91b1d180-db59-4b2c-97df-1d5985e978b2
|
RebuildImageAndCheck
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
func RebuildImageAndCheck(mosc *MachineOSConfig) {
exutil.By("Rebuild the current image")
var (
mcp = exutil.OrFail[*MachineConfigPool](mosc.GetMachineConfigPool())
mosb = exutil.OrFail[*MachineOSBuild](mosc.GetCurrentMachineOSBuild())
currentImagePullSpec = exutil.OrFail[string](mosc.GetStatusCurrentImagePullSpec())
)
o.Expect(mosc.Rebuild()).To(o.Succeed(),
"Error patching %s to rebuild the current image", mosc)
logger.Infof("OK!\n")
exutil.By("Check that the existing MOSB is reused and it builds a new image")
o.Eventually(mosb.GetJob, "2m", "20s").Should(Exist(), "Rebuild job was not created")
o.Eventually(mosb, "20m", "20s").Should(HaveConditionField("Building", "status", FalseString), "Rebuild was not finished")
o.Eventually(mosb, "10m", "20s").Should(HaveConditionField("Succeeded", "status", TrueString), "Rebuild didn't succeed")
o.Eventually(mosb, "2m", "20s").Should(HaveConditionField("Interrupted", "status", FalseString), "Reuild was interrupted")
o.Eventually(mosb, "2m", "20s").Should(HaveConditionField("Failed", "status", FalseString), "Reuild was failed")
logger.Infof("Check that the rebuild job was deleted")
o.Eventually(mosb.GetJob, "2m", "20s").ShouldNot(Exist(), "Build job was not cleaned")
logger.Infof("OK!\n")
exutil.By("Wait for the new image to be applied")
nodes, err := mcp.GetCoreOsNodes()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting coreos nodes from %s", mcp)
if len(nodes) > 0 {
node := nodes[0]
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the new image is the one used in the nodes")
newImagePullSpec := exutil.OrFail[string](mosc.GetStatusCurrentImagePullSpec())
o.Expect(newImagePullSpec).NotTo(o.Equal(currentImagePullSpec),
"The new image after the rebuild operation should be different fron the initial image")
o.Expect(node.GetCurrentBootOSImage()).To(o.Equal(newImagePullSpec),
"The new image is not being used in node %s", node)
logger.Infof("OK!\n")
} else {
logger.Infof("There is no coreos node configured in %s. We don't wait for the configuration to be applied and we don't execute any verification on the nodes", mcp)
logger.Infof("OK!\n")
}
}
|
mco
| |||||
test case
|
openshift/openshift-tests-private
|
5d6b316e-97aa-47de-a5a7-4a184a40a190
|
Author:sregidor-NonPreRelease-Medium-79172-OCB Inherit from global pull secret if baseImagePullSecret field is not specified [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-NonPreRelease-Medium-79172-OCB Inherit from global pull secret if baseImagePullSecret field is not specified [Disruptive]", func() {
var (
infraMcpName = "infra"
)
exutil.By("Create custom infra MCP")
// We add no workers to the infra pool, it is not necessary
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
logger.Infof("OK!\n")
testContainerFile([]ContainerFile{}, MachineConfigNamespace, infraMcp, nil, true)
})
| ||||||
test case
|
openshift/openshift-tests-private
|
4eeb9987-8e67-44e6-92f5-17fc79b64b6a
|
Author:sregidor-NonPreRelease-High-73494-[P1] OCB Wiring up Productionalized Build Controller. New 4.16 OCB API [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-NonPreRelease-High-73494-[P1] OCB Wiring up Productionalized Build Controller. New 4.16 OCB API [Disruptive]", func() {
var (
infraMcpName = "infra"
moscName = "tc-73494-infra"
)
exutil.By("Create custom infra MCP")
// We add no workers to the infra pool, it is not necessary
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new infra MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, infraMcpName, nil)
defer mosc.CleanupAndDelete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
ValidateSuccessfulMOSC(mosc, nil)
exutil.By("Remove the MachineOSConfig resource")
o.Expect(mosc.CleanupAndDelete()).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
ValidateMOSCIsGarbageCollected(mosc, infraMcp)
exutil.AssertAllPodsToBeReady(oc.AsAdmin(), MachineConfigNamespace)
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
a24cde37-755b-4293-8ad2-bda79123c67d
|
Author:sregidor-NonPreRelease-Medium-73599-[P2] OCB Validate MachineOSConfig. New 41.6 OCB API [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-NonPreRelease-Medium-73599-[P2] OCB Validate MachineOSConfig. New 41.6 OCB API [Disruptive]", func() {
var (
infraMcpName = "infra"
moscName = "tc-73599-infra"
pushSpec = fmt.Sprintf("%s/openshift-machine-config-operator/ocb-%s-image:latest", InternalRegistrySvcURL, infraMcpName)
pullSecret = NewSecret(oc.AsAdmin(), "openshift-config", "pull-secret")
fakePullSecretName = "fake-pull-secret"
expectedWrongPullSecretMsg = fmt.Sprintf(`could not validate baseImagePullSecret "%s" for MachineOSConfig %s: secret %s from %s is not found. Did you use the right secret name?`,
fakePullSecretName, moscName, fakePullSecretName, moscName)
fakePushSecretName = "fake-push-secret"
expectedWrongPushSecretMsg = fmt.Sprintf(`could not validate renderedImagePushSecret "%s" for MachineOSConfig %s: secret %s from %s is not found. Did you use the right secret name?`,
fakePushSecretName, moscName, fakePushSecretName, moscName)
fakeBuilderType = "FakeBuilderType"
expectedWrongBuilderTypeMsg = fmt.Sprintf(`Unsupported value: "%s": supported values: "Job"`, fakeBuilderType)
)
exutil.By("Create custom infra MCP")
// We add no workers to the infra pool, it is not necessary
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
logger.Infof("OK!\n")
exutil.By("Clone the pull-secret in MCO namespace")
clonedSecret, err := CloneResource(pullSecret, "cloned-pull-secret-"+exutil.GetRandomString(), MachineConfigNamespace, nil)
defer clonedSecret.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error duplicating the cluster's pull-secret in MCO namespace")
logger.Infof("OK!\n")
// Check behaviour when wrong pullSecret
checkMisconfiguredMOSC(oc.AsAdmin(), moscName, infraMcpName, fakePullSecretName, clonedSecret.GetName(), pushSpec, nil,
expectedWrongPullSecretMsg,
"Check that MOSC using wrong pull secret are failing as expected")
// Check behaviour when wrong pushSecret
checkMisconfiguredMOSC(oc.AsAdmin(), moscName, infraMcpName, clonedSecret.GetName(), fakePushSecretName, pushSpec, nil,
expectedWrongPushSecretMsg,
"Check that MOSC using wrong push secret are failing as expected")
// Try to create a MOSC with a wrong pushSpec
logger.Infof("Create a MachineOSConfig resource with a wrong builder type")
err = NewMCOTemplate(oc, "generic-machine-os-config.yaml").Create("-p", "NAME="+moscName, "POOL="+infraMcpName, "PULLSECRET="+clonedSecret.GetName(),
"PUSHSECRET="+clonedSecret.GetName(), "PUSHSPEC="+pushSpec, "IMAGEBUILDERTYPE="+fakeBuilderType)
o.Expect(err).To(o.HaveOccurred(), "Expected oc command to fail, but it didn't")
o.Expect(err).To(o.BeAssignableToTypeOf(&exutil.ExitError{}), "Unexpected error while creating the new MOSC")
o.Expect(err.(*exutil.ExitError).StdErr).To(o.ContainSubstring(expectedWrongBuilderTypeMsg),
"MSOC creation using wrong image type builder should be forbidden")
logger.Infof("OK!")
})
| |||||
test case
|
openshift/openshift-tests-private
|
f5ce2d16-217f-48cd-b7dc-98a9e6c35193
|
Author:ptalgulk-ConnectedOnly-Longduration-NonPreRelease-Critical-74645-Panic Condition for Non-Matching MOSC Resources [Disruptive]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:ptalgulk-ConnectedOnly-Longduration-NonPreRelease-Critical-74645-Panic Condition for Non-Matching MOSC Resources [Disruptive]", func() {
var (
infraMcpName = "infra"
moscName = "tc-74645"
mcc = NewController(oc.AsAdmin())
)
exutil.By("Create New Custom MCP")
defer DeleteCustomMCP(oc.AsAdmin(), infraMcpName)
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 1)
o.Expect(err).NotTo(o.HaveOccurred(), "Could not create a new custom MCP")
node := infraMcp.GetNodesOrFail()[0]
logger.Infof("%s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new infra MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, infraMcpName, nil)
defer DisableOCL(mosc)
// remove after this bug is fixed OCPBUGS-36810
defer func() {
logger.Infof("Configmaps should also be deleted ")
cmList := NewConfigMapList(mosc.GetOC(), MachineConfigNamespace).GetAllOrFail()
for _, cm := range cmList {
if strings.Contains(cm.GetName(), "rendered-") {
o.Expect(cm.Delete()).Should(o.Succeed(), "The ConfigMap related to MOSC has not been removed")
}
}
}()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
exutil.By("Check that a new build has been triggered")
o.Eventually(mosc.GetCurrentMachineOSBuild, "5m", "20s").Should(Exist(),
"No build was created when OCB was enabled")
mosb, err := mosc.GetCurrentMachineOSBuild()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MOSB from MOSC")
o.Eventually(mosb.GetJob, "5m", "20s").Should(Exist(),
"No build pod was created when OCB was enabled")
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
exutil.By("Delete the MCOS and check it is deleted")
o.Expect(mosc.CleanupAndDelete()).To(o.Succeed(), "Error cleaning up %s", mosc)
ValidateMOSCIsGarbageCollected(mosc, infraMcp)
o.Expect(mosb).NotTo(Exist(), "Build is not deleted")
o.Expect(mosc).NotTo(Exist(), "MOSC is not deleted")
logger.Infof("OK!\n")
exutil.By("Check MCC Logs for Panic is not produced")
exutil.AssertAllPodsToBeReady(oc.AsAdmin(), MachineConfigNamespace)
mccPrevLogs, err := mcc.GetPreviousLogs()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting previous MCC logs")
o.Expect(mccPrevLogs).NotTo(o.Or(o.ContainSubstring("panic"), o.ContainSubstring("Panic")), "Panic is seen in MCC previous logs after deleting OCB resources:\n%s", mccPrevLogs)
mccLogs, err := mcc.GetLogs()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MCC logs")
o.Expect(mccLogs).NotTo(o.Or(o.ContainSubstring("panic"), o.ContainSubstring("Panic")), "Panic is seen in MCC logs after deleting OCB resources:\n%s", mccLogs)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
1b34f6c8-3a50-4dc9-9620-b495cdef16aa
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Critical-73496-[P1] OCB use custom Containerfile. New 4.16 OCB API[Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Critical-73496-[P1] OCB use custom Containerfile. New 4.16 OCB API[Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
containerFileContent = `
# Pull the centos base image and enable the EPEL repository.
FROM quay.io/centos/centos:stream9 AS centos
RUN dnf install -y epel-release
# Pull an image containing the yq utility.
FROM quay.io/multi-arch/yq:4.25.3 AS yq
# Build the final OS image for this MachineConfigPool.
FROM configs AS final
# Copy the EPEL configs into the final image.
COPY --from=yq /usr/bin/yq /usr/bin/yq
COPY --from=centos /etc/yum.repos.d /etc/yum.repos.d
COPY --from=centos /etc/pki/rpm-gpg/RPM-GPG-KEY-* /etc/pki/rpm-gpg/
# Install cowsay and ripgrep from the EPEL repository into the final image,
# along with a custom cow file.
RUN sed -i 's/\$stream/9-stream/g' /etc/yum.repos.d/centos*.repo && \
rpm-ostree install cowsay ripgrep
`
checkers = []Checker{
CommandOutputChecker{
Command: []string{"cowsay", "-t", "hello"},
Matcher: o.ContainSubstring("< hello >"),
ErrorMsg: fmt.Sprintf("Cowsay is not working after installing the new image"),
Desc: fmt.Sprintf("Check that cowsay is installed and working"),
},
}
)
testContainerFile([]ContainerFile{{Content: containerFileContent}}, MachineConfigNamespace, mcp, checkers, false)
})
| |||||
test case
|
openshift/openshift-tests-private
|
90816dd1-93eb-46bc-8c9c-651591b8dc2b
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Medium-78001-[P2] The etc-pki-etitlement secret is created automatically for OCB Use custom Containerfile with rhel enablement [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-Medium-78001-[P2] The etc-pki-etitlement secret is created automatically for OCB Use custom Containerfile with rhel enablement [Disruptive]", func() {
var (
entitlementSecret = NewSecret(oc.AsAdmin(), "openshift-config-managed", "etc-pki-entitlement")
containerFileContent = `
FROM configs AS final
RUN rm -rf /etc/rhsm-host && \
rpm-ostree install buildah && \
ln -s /run/secrets/rhsm /etc/rhsm-host && \
ostree container commit
`
checkers = []Checker{
CommandOutputChecker{
Command: []string{"rpm", "-q", "buildah"},
Matcher: o.ContainSubstring("buildah-"),
ErrorMsg: fmt.Sprintf("Buildah package is not installed after the image was deployed"),
Desc: fmt.Sprintf("Check that buildah is installed"),
},
}
mcp = GetCompactCompatiblePool(oc.AsAdmin())
)
if !entitlementSecret.Exists() {
g.Skip(fmt.Sprintf("There is no entitlement secret available in this cluster %s. This test case cannot be executed", entitlementSecret))
}
testContainerFile([]ContainerFile{{Content: containerFileContent}}, MachineConfigNamespace, mcp, checkers, false)
})
| |||||
test case
|
openshift/openshift-tests-private
|
67b95a22-b4d5-441a-825f-4417cf0563fc
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-73947-OCB use OutputImage CurrentImagePullSecret [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-73947-OCB use OutputImage CurrentImagePullSecret [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
tmpNamespaceName = "tc-73947-mco-ocl-images"
checkers = []Checker{
CommandOutputChecker{
Command: []string{"rpm-ostree", "status"},
Matcher: o.ContainSubstring(fmt.Sprintf("%s/%s/ocb-%s-image", InternalRegistrySvcURL, tmpNamespaceName, mcp.GetName())),
ErrorMsg: fmt.Sprintf("The nodes are not using the expected OCL image stored in the internal registry"),
Desc: fmt.Sprintf("Check that the nodes are using the right OS image"),
},
}
)
testContainerFile([]ContainerFile{}, tmpNamespaceName, mcp, checkers, false)
})
| |||||
test case
|
openshift/openshift-tests-private
|
1e123254-ef74-4d55-b97d-aa9add34106e
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-72003-[P1] OCB Opting into on-cluster builds must respect maxUnavailable setting. Workers.[Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-72003-[P1] OCB Opting into on-cluster builds must respect maxUnavailable setting. Workers.[Disruptive]", func() {
SkipIfSNO(oc.AsAdmin()) // This test makes no sense in SNO
var (
moscName = "test-" + GetCurrentTestPolarionIDNumber()
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
workerNodes = wMcp.GetSortedNodesOrFail()
)
exutil.By("Configure maxUnavailable if worker pool has more than 2 nodes")
if len(workerNodes) > 2 {
wMcp.SetMaxUnavailable(2)
defer wMcp.RemoveMaxUnavailable()
}
maxUnavailable := exutil.OrFail[int](wMcp.GetMaxUnavailableInt())
logger.Infof("Current maxUnavailable value %d", maxUnavailable)
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new worker MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, wMcp.GetName(), nil)
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new worker MCP")
o.Eventually(wMcp.GetUpdatingStatus, "15m", "15s").Should(o.Equal("True"),
"The worker MCP did not start updating")
logger.Infof("OK!\n")
exutil.By("Poll the nodes sorted by the order they are updated")
updatedNodes := wMcp.GetSortedUpdatedNodes(maxUnavailable)
for _, n := range updatedNodes {
logger.Infof("updated node: %s created: %s zone: %s", n.GetName(), n.GetOrFail(`{.metadata.creationTimestamp}`), n.GetOrFail(`{.metadata.labels.topology\.kubernetes\.io/zone}`))
}
logger.Infof("OK!\n")
exutil.By("Wait for the configuration to be applied in all nodes")
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that nodes were updated in the right order")
rightOrder := checkUpdatedLists(workerNodes, updatedNodes, maxUnavailable)
o.Expect(rightOrder).To(o.BeTrue(), "Expected update order %s, but found order %s", workerNodes, updatedNodes)
logger.Infof("OK!\n")
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
01719bc5-8822-4181-b6e7-a85897c73465
|
Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-73497-[P2] OCB build images in many MCPs at the same time [Disruptive]
|
['"fmt"', '"sync"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-ConnectedOnly-Longduration-NonPreRelease-High-73497-[P2] OCB build images in many MCPs at the same time [Disruptive]", func() {
SkipIfCompactOrSNO(oc.AsAdmin()) // This test makes no sense in SNO or compact
var (
customMCPNames = "infra"
numCustomPools = 5
moscList = []*MachineOSConfig{}
mcpList = []*MachineConfigPool{}
wg sync.WaitGroup
)
exutil.By("Create custom MCPS")
for i := 0; i < numCustomPools; i++ {
infraMcpName := fmt.Sprintf("%s-%d", customMCPNames, i)
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
mcpList = append(mcpList, infraMcp)
}
logger.Infof("OK!\n")
exutil.By("Checking that all MOSCs were executed properly")
for _, infraMcp := range mcpList {
moscName := fmt.Sprintf("mosc-%s", infraMcp.GetName())
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, infraMcp.GetName(), nil)
defer mosc.CleanupAndDelete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
moscList = append(moscList, mosc)
wg.Add(1)
go func() {
defer g.GinkgoRecover()
defer wg.Done()
ValidateSuccessfulMOSC(mosc, nil)
}()
}
wg.Wait()
logger.Infof("OK!\n")
exutil.By("Removing all MOSC resources")
for _, mosc := range moscList {
o.Expect(mosc.CleanupAndDelete()).To(o.Succeed(), "Error cleaning up %s", mosc)
}
logger.Infof("OK!\n")
exutil.By("Validate that all resources were garbage collected")
for i := 0; i < numCustomPools; i++ {
ValidateMOSCIsGarbageCollected(moscList[i], mcpList[i])
}
logger.Infof("OK!\n")
exutil.AssertAllPodsToBeReady(oc.AsAdmin(), MachineConfigNamespace)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
d81ea954-3363-448a-b159-b9e3ab989da0
|
Author:sregidor-Longduration-NonPreRelease-High-77498-OCB Trigger new build when renderedImagePushspec is updated [Disruptive]
|
['"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-77498-OCB Trigger new build when renderedImagePushspec is updated [Disruptive]", func() {
var (
infraMcpName = "infra"
moscName = "tc-77498-infra"
)
exutil.By("Create custom infra MCP")
// We add no workers to the infra pool, it is not necessary
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new infra MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, infraMcpName, nil)
defer mosc.CleanupAndDelete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
ValidateSuccessfulMOSC(mosc, nil)
exutil.By("Set a new rendered image pull spec")
initialMOSB := exutil.OrFail[*MachineOSBuild](mosc.GetCurrentMachineOSBuild())
initialRIPS := exutil.OrFail[string](mosc.GetRenderedImagePushspec())
o.Expect(
mosc.SetRenderedImagePushspec(strings.ReplaceAll(initialRIPS, "ocb-", "ocb77498-")),
).NotTo(o.HaveOccurred(), "Error patching %s to set the new renderedImagePullSpec", mosc)
logger.Infof("OK!\n")
exutil.By("Check that a new build is triggered")
checkNewBuildIsTriggered(mosc, initialMOSB)
logger.Infof("OK!\n")
exutil.By("Set the original rendered image pull spec")
o.Expect(
mosc.SetRenderedImagePushspec(initialRIPS),
).NotTo(o.HaveOccurred(), "Error patching %s to set the new renderedImagePullSpec", mosc)
logger.Infof("OK!\n")
exutil.By("Check that the initial build is reused")
var currentMOSB *MachineOSBuild
o.Eventually(func() (string, error) {
currentMOSB, err = mosc.GetCurrentMachineOSBuild()
return currentMOSB.GetName(), err
}, "5m", "20s").Should(o.Equal(initialMOSB.GetName()),
"When the containerfiles were removed and initial MOSC configuration was restored, the initial MOSB was not used")
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
a320d410-9816-4c1d-a813-5532e7cf4481
|
Author:sregidor-Longduration-NonPreRelease-High-77497-OCB Trigger new build when Containerfile is updated [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-77497-OCB Trigger new build when Containerfile is updated [Disruptive]", func() {
var (
infraMcpName = "infra"
moscName = "tc-77497-infra"
containerFile = ContainerFile{Content: "RUN touch /etc/test-add-containerfile"}
containerFileMod = ContainerFile{Content: "RUN touch /etc/test-modified-containerfile"}
)
exutil.By("Create custom infra MCP")
// We add no workers to the infra pool, it is not necessary
infraMcp, err := CreateCustomMCP(oc.AsAdmin(), infraMcpName, 0)
defer infraMcp.delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a new custom pool: %s", infraMcpName)
logger.Infof("OK!\n")
exutil.By("Configure OCB functionality for the new infra MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, infraMcpName, nil)
defer mosc.CleanupAndDelete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
ValidateSuccessfulMOSC(mosc, nil)
exutil.By("Add new container file")
initialMOSB := exutil.OrFail[*MachineOSBuild](mosc.GetCurrentMachineOSBuild())
o.Expect(
mosc.SetContainerfiles([]ContainerFile{containerFile}),
).NotTo(o.HaveOccurred(), "Error patching %s to add a container file", mosc)
logger.Infof("OK!\n")
exutil.By("Check that a new build is triggered when a containerfile is added")
checkNewBuildIsTriggered(mosc, initialMOSB)
logger.Infof("OK!\n")
exutil.By("Modify the container file")
currentMOSB := exutil.OrFail[*MachineOSBuild](mosc.GetCurrentMachineOSBuild())
o.Expect(
mosc.SetContainerfiles([]ContainerFile{containerFileMod}),
).NotTo(o.HaveOccurred(), "Error patching %s to modify an existing container file", mosc)
logger.Infof("OK!\n")
exutil.By("Check that a new build is triggered when a containerfile is modified")
checkNewBuildIsTriggered(mosc, currentMOSB)
logger.Infof("OK!\n")
exutil.By("Remove the container files")
o.Expect(
mosc.RemoveContainerfiles(),
).NotTo(o.HaveOccurred(), "Error patching %s to remove the configured container files", mosc)
logger.Infof("OK!\n")
exutil.By("Check that the initial build is reused")
o.Eventually(func() (string, error) {
currentMOSB, err = mosc.GetCurrentMachineOSBuild()
return currentMOSB.GetName(), err
}, "5m", "20s").Should(o.Equal(initialMOSB.GetName()),
"When the containerfiles were removed and initial MOSC configuration was restored, the initial MOSB was not used")
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
016ef1a5-7f56-4e48-8e94-ac9c51af7c82
|
Author:sregidor-Longduration-NonPreRelease-High-77576-In OCB. Create a new MC while a build is running [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-77576-In OCB. Create a new MC while a build is running [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
node = mcp.GetSortedNodesOrFail()[0]
moscName = "test-" + mcp.GetName() + "-" + GetCurrentTestPolarionIDNumber()
fileMode = "0644" // decimal 420
filePath = "/etc/test-77576"
fileContent = "test file"
mcName = "tc-77576-testfile"
fileConfig = getBase64EncodedFileConfig(filePath, fileContent, fileMode)
mc = NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
)
exutil.By("Configure OCB functionality for the new worker MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, mcp.GetName(), nil)
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
exutil.By("Check that a new build has been triggered and is building")
var mosb *MachineOSBuild
o.Eventually(func() (*MachineOSBuild, error) {
var err error
mosb, err = mosc.GetCurrentMachineOSBuild()
return mosb, err
}, "5m", "20s").Should(Exist(),
"No build was created when OCB was enabled")
o.Eventually(mosb.GetJob, "5m", "20s").Should(Exist(),
"No build job was created when OCB was enabled")
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
exutil.By("Create a MC to trigger a new build")
defer mc.delete()
err = mc.Create("-p", "NAME="+mcName, "-p", "POOL="+mcp.GetName(), "-p", fmt.Sprintf("FILES=[%s]", fileConfig))
o.Expect(err).NotTo(o.HaveOccurred())
logger.Infof("OK!\n")
exutil.By("Check that a new build is triggered and the old build is removed")
checkNewBuildIsTriggered(mosc, mosb)
o.Eventually(mosb, "2m", "20s").ShouldNot(Exist(), "The old MOSB %s was not deleted", mosb)
logger.Infof("OK!\n")
exutil.By("Wait for the configuration to be applied")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the MC was applied")
rf := NewRemoteFile(node, filePath)
o.Eventually(rf, "2m", "20s").Should(HaveContent(o.Equal(fileContent)),
"%s doesn't have the right content", rf)
logger.Infof("OK!\n")
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
22a4f452-3a2f-429a-a166-254f2bd152ed
|
Author:sregidor-Longduration-NonPreRelease-High-77781-OCB Rebuild a successful build [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-77781-OCB Rebuild a successful build [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
moscName = "test-" + mcp.GetName() + "-" + GetCurrentTestPolarionIDNumber()
)
exutil.By("Configure OCB functionality for the new worker MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, mcp.GetName(), nil)
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
ValidateSuccessfulMOSC(mosc, nil)
// rebuild the image and check that the image is properly applied in the nodes
RebuildImageAndCheck(mosc)
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
3c5290f1-3972-40fc-88dc-8aeaac211148
|
Author:sregidor-Longduration-NonPreRelease-High-77782-[P2] OCB Rebuild an interrupted build [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:sregidor-Longduration-NonPreRelease-High-77782-[P2] OCB Rebuild an interrupted build [Disruptive]", func() {
var (
mcp = GetCompactCompatiblePool(oc.AsAdmin())
moscName = "test-" + mcp.GetName() + "-" + GetCurrentTestPolarionIDNumber()
)
exutil.By("Configure OCB functionality for the new worker MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, mcp.GetName(), nil)
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
exutil.By("Wait until MOSB starts building")
var mosb *MachineOSBuild
var job *Job
o.Eventually(func() (*MachineOSBuild, error) {
var err error
mosb, err = mosc.GetCurrentMachineOSBuild()
return mosb, err
}, "5m", "20s").Should(Exist(),
"No build was created when OCB was enabled")
o.Eventually(func() (*Job, error) {
var err error
job, err = mosb.GetJob()
return job, err
}, "5m", "20s").Should(Exist(),
"No build job was created when OCB was enabled")
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
exutil.By("Interrupt the build")
o.Expect(job.Delete()).To(o.Succeed(),
"Error deleting %s", job)
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Interrupted", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
// TODO: what's the intended MCP status when a build is interrupted? We need to check this status here
// rebuild the image and check that the image is properly applied in the nodes
RebuildImageAndCheck(mosc)
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
f1ecc946-f982-445f-8c9d-10c76c756462
|
Author:ptalgulk-ConnectedOnly-Longduration-NonPreRelease-Medium-77977-Install extension after OCB is enabled [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:ptalgulk-ConnectedOnly-Longduration-NonPreRelease-Medium-77977-Install extension after OCB is enabled [Disruptive]", func() {
var (
moscName = "test-" + GetCurrentTestPolarionIDNumber()
mcp = GetCompactCompatiblePool(oc.AsAdmin())
node = mcp.GetSortedNodesOrFail()[0]
mcName = "test-install-extenstion-" + GetCurrentTestPolarionIDNumber()
)
exutil.By("Configure OCB functionality for the new worker MCP")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, moscName, mcp.GetName(), nil)
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
ValidateSuccessfulMOSC(mosc, nil)
exutil.By("Create a MC")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.SetParams(`EXTENSIONS=["usbguard"]`)
defer mc.delete()
mc.create()
exutil.By("Wait for the configuration to be applied")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify worker node includes usbguard extenstion")
o.Expect(
node.DebugNodeWithChroot("rpm", "-q", "usbguard"),
).Should(o.ContainSubstring("usbguard-"), "usbguard has not been installed")
logger.Infof("OK!\n")
exutil.By("Delete a MC.")
mc.delete()
logger.Infof("OK!\n")
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
ValidateMOSCIsGarbageCollected(mosc, mcp)
logger.Infof("OK!\n")
})
| ||||||
test case
|
openshift/openshift-tests-private
|
ac85e329-4ae2-4256-9d34-b52358205371
|
Author:ptalgulk-ConnectedOnly-Longduration-NonPreRelease-Medium-78196-Verify for etc-pki-etitlement secret is removed for OCB rhel enablement [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_ocb.go
|
g.It("Author:ptalgulk-ConnectedOnly-Longduration-NonPreRelease-Medium-78196-Verify for etc-pki-etitlement secret is removed for OCB rhel enablement [Disruptive]", func() {
var (
entitlementSecret = NewSecret(oc.AsAdmin(), "openshift-config-managed", "etc-pki-entitlement")
containerFileContent = `
FROM configs AS final
RUN rm -rf /etc/rhsm-host && \
rpm-ostree install buildah && \
ln -s /run/secrets/rhsm /etc/rhsm-host && \
ostree container commit
`
mcp = GetCompactCompatiblePool(oc.AsAdmin())
)
if !entitlementSecret.Exists() {
g.Skip(fmt.Sprintf("There is no entitlement secret available in this cluster %s. This test case cannot be executed", entitlementSecret))
}
exutil.By("Copy the entitlement secret in MCO namespace")
mcoEntitlementSecret, err := CloneResource(entitlementSecret, "etc-pki-entitlement", MachineConfigNamespace, nil)
defer mcoEntitlementSecret.Delete()
o.Expect(err).NotTo(o.HaveOccurred(), "Error copying %s to the %s namespace", mcoEntitlementSecret, MachineConfigNamespace)
logger.Infof("OK!\n")
exutil.By("Delete the entitlement secret in the openshift-config-managed namespace")
defer func() {
exutil.By("Recover the entitlement secret in the openshift-config-managed namespace")
recoverSecret, err := CloneResource(mcoEntitlementSecret, "etc-pki-entitlement", "openshift-config-managed", nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Error copying %s to the openshift-config-managed namespace", entitlementSecret)
o.Expect(recoverSecret).To(Exist(), "Unable to recover the entitlement secret in openshift-config-managed namespace")
}()
entitlementSecret.Delete()
logger.Infof("OK!\n")
exutil.By("Create the MOSC")
mosc, err := CreateMachineOSConfigUsingExternalOrInternalRegistry(oc.AsAdmin(), MachineConfigNamespace, "test-78196-mosc", mcp.GetName(), []ContainerFile{{Content: containerFileContent}})
defer DisableOCL(mosc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the MachineOSConfig resource")
logger.Infof("OK!\n")
exutil.By("Check that a new build has been triggered")
o.Eventually(mosc.GetCurrentMachineOSBuild, "5m", "20s").Should(Exist(),
"No build was created when OCB was enabled")
mosb, err := mosc.GetCurrentMachineOSBuild()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting MOSB from MOSC")
o.Eventually(mosb, "5m", "20s").Should(HaveConditionField("Building", "status", TrueString),
"MachineOSBuild didn't report that the build has begun")
logger.Infof("OK!\n")
exutil.By("Verify the error is produced in buildPod")
exutil.AssertAllPodsToBeReady(oc.AsAdmin(), MachineConfigNamespace)
logger.Infof("OK!\n")
job, err := mosb.GetJob()
o.Expect(err).NotTo(o.HaveOccurred())
// Currently this kind of resources are leaked. Until the leak is fixed we need to make sure that this job is removed
// because its pods are in "Error" status and there are other test cases checking that no pod is reporting any error.
// TODO: remove this once the leak is fixed
defer job.Delete()
logger.Infof("OK!\n")
o.Eventually(job.Logs, "5m", "10s").Should(o.ContainSubstring("Found 0 entitlement certificates"), "Error getting the logs")
o.Eventually(job, "15m", "20s").Should(HaveConditionField("Failed", "status", TrueString), "Job didn't fail")
logger.Infof("OK!\n")
exutil.By("Remove the MachineOSConfig resource")
o.Expect(DisableOCL(mosc)).To(o.Succeed(), "Error cleaning up %s", mosc)
ValidateMOSCIsGarbageCollected(mosc, mcp)
logger.Infof("OK!\n")
})
| |||||
test
|
openshift/openshift-tests-private
|
e7db84cb-5100-4b8b-aa07-1058e8a51fc4
|
mco_password
|
import (
"fmt"
"path/filepath"
"regexp"
expect "github.com/google/goexpect"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
package mco
import (
"fmt"
"path/filepath"
"regexp"
expect "github.com/google/goexpect"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-mco] MCO password", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("mco-password", exutil.KubeConfigPath())
passwordHash string
updatedPasswdHash string
user string
password string
updatedPassword string
wMcp *MachineConfigPool
mMcp *MachineConfigPool
// Compact compatible MCP. If the node is compact/SNO this variable will be the master pool, else it will be the worker pool
mcp *MachineConfigPool
)
g.JustBeforeEach(func() {
passwordHash = "$6$uim4LuKWqiko1l5K$QJUwg.4lAyU4egsM7FNaNlSbuI6JfQCRufb99QuF082BpbqFoHP3WsWdZ5jCypS0veXWN1HDqO.bxUpE9aWYI1" // sha-512 "coretest"
updatedPasswdHash = "$6$sGXk8kzDPwf165.v$9Oc0fXJpFmUy8cSZzzjrW7pDQwaYbPojAR7CHAKRl81KDYrk2RQrcFI9gLfhfrPMHI2WuX4Us6ZBkO1KfF48/." // sha-512 "coretest2"
user = "core"
password = "coretest"
updatedPassword = "coretest2"
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
mcp = GetCompactCompatiblePool(oc.AsAdmin())
preChecks(oc)
})
g.It("Author:sregidor-NonPreRelease-Longduration-High-59417-[P1][OnCLayer] MCD create/update password with MachineConfig in CoreOS nodes[Disruptive]", func() {
var (
mcName = "tc-59417-test-core-passwd"
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
isOCL = exutil.OrFail[bool](mcp.IsOCL())
)
allCoreos := mcp.GetCoreOsNodesOrFail()
if len(allCoreos) == 0 {
logger.Infof("No CoreOs nodes are configured in the pool %s. We use master pool for testing", mcp.GetName())
mcp = mMcp
allCoreos = mcp.GetCoreOsNodesOrFail()
}
node := allCoreos[0]
startTime := node.GetDateOrFail()
exutil.By("Configure a password for 'core' user")
_, _ = node.GetDate() // for debugging purposes, it prints the node's current time in the logs
o.Expect(node.IgnoreEventsBeforeNow()).NotTo(o.HaveOccurred(),
"Error getting the latest event in node %s", node.GetName())
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[{"name":"%s", "passwordHash": "%s" }]`, user, passwordHash)}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check MCD logs to make sure drain and reboot are skipped unless OCL")
checkDrainAction(isOCL, node, mcc)
checkRebootAction(isOCL, node, startTime)
logger.Infof("OK!\n")
if !isOCL {
exutil.By("Check events to make sure that drain and reboot events were not triggered")
nodeEvents, eErr := node.GetEvents()
o.Expect(eErr).ShouldNot(o.HaveOccurred(), "Error getting drain events for node %s", node.GetName())
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Drain"), "Error, a Drain event was triggered but it shouldn't")
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Reboot"), "Error, a Reboot event was triggered but it shouldn't")
logger.Infof("OK!\n")
} else {
logger.Infof("OCL pool, skipping events checks")
}
exutil.By("Verify that user 'core' can login with the configured password")
logger.Infof("verifying node %s", node.GetName())
bresp, err := node.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, password))
o.Expect(err).NotTo(o.HaveOccurred(), "Error in the login process in node %s:\n %s", node.GetName(), bresp)
logger.Infof("OK!\n")
exutil.By("Update the password value")
patchErr := mc.Patch("json",
fmt.Sprintf(`[{ "op": "add", "path": "/spec/config/passwd/users/0/passwordHash", "value": "%s"}]`, updatedPasswdHash))
o.Expect(patchErr).NotTo(o.HaveOccurred(),
"Error patching mc %s to update the 'core' user password")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify that user 'core' can login with the new password")
logger.Infof("verifying node %s", node.GetName())
bresp, err = node.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, updatedPassword))
o.Expect(err).NotTo(o.HaveOccurred(), "Error in the login process in node %s:\n %s", node.GetName(), bresp)
logger.Infof("OK!\n")
exutil.By("Remove the password")
mc.deleteNoWait()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify that user 'core' can not login using a password anymore")
logger.Infof("verifying node %s", node.GetName())
bresp, err = node.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, updatedPassword))
o.Expect(err).To(o.HaveOccurred(), "User 'core' was able to login using a password in node %s, but it should not be possible:\n %s", node.GetName(), bresp)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonPreRelease-Longduration-High-60129-[P2][OnCLayer] MCD create/update password with MachineConfig in RHEL nodes[Disruptive]", func() {
var (
mcName = "tc-60129-test-core-passwd"
)
allRhelNodes := NewNodeList(oc).GetAllRhelWokerNodesOrFail()
if len(allRhelNodes) == 0 {
g.Skip("There are no rhel worker nodes in this cluster")
}
allWorkerNodes := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()
exutil.By("Create the 'core' user in RHEL nodes")
for _, rhelWorker := range allRhelNodes {
// we need to do this to avoid the loop variable to override our value
if !rhelWorker.UserExists(user) {
worker := rhelWorker
defer func() { worker.UserDel(user) }()
o.Expect(worker.UserAdd(user)).NotTo(o.HaveOccurred(),
"Error creating user in node %s", worker.GetName())
} else {
logger.Infof("User %s already exists in node %s. Skip creation.", user, rhelWorker.GetName())
}
}
exutil.By("Configure a password for 'core' user")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[{"name":"%s", "passwordHash": "%s" }]`, user, passwordHash)}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify that user 'core' can login with the configured password")
for _, workerNode := range allWorkerNodes {
logger.Infof("Verifying node %s", workerNode.GetName())
bresp, err := workerNode.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, password))
o.Expect(err).NotTo(o.HaveOccurred(), "Error in the login process in node %s:\n %s", workerNode.GetName(), bresp)
}
logger.Infof("OK!\n")
exutil.By("Update the password value")
patchErr := mc.Patch("json",
fmt.Sprintf(`[{ "op": "add", "path": "/spec/config/passwd/users/0/passwordHash", "value": "%s"}]`, updatedPasswdHash))
o.Expect(patchErr).NotTo(o.HaveOccurred(),
"Error patching mc %s to update the 'core' user password")
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify that user 'core' can login with the new password")
for _, workerNode := range allWorkerNodes {
logger.Infof("Verifying node %s", workerNode.GetName())
bresp, err := workerNode.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, updatedPassword))
o.Expect(err).NotTo(o.HaveOccurred(), "Error in the login process in node %s:\n %s", workerNode.GetName(), bresp)
}
logger.Infof("OK!\n")
exutil.By("Remove the password")
mc.deleteNoWait()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify that user 'core' can not login using a password anymore")
for _, workerNode := range allWorkerNodes {
logger.Infof("Verifying node %s", workerNode.GetName())
bresp, err := workerNode.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, updatedPassword))
o.Expect(err).To(o.HaveOccurred(), "User 'core' was able to login using a password in node %s, but it should not be possible:\n %s", workerNode.GetName(), bresp)
}
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-72137-[OnCLayer] Create a password for a user different from 'core' user[Disruptive]", func() {
var (
mcName = "mco-tc-59900-wrong-user-password"
wrongUser = "root"
passwordHash = "fake-hash"
expectedRDReason = ""
expectedRDMessage = regexp.QuoteMeta(`ignition passwd user section contains unsupported changes: non-core user`)
)
exutil.By("Create a password for a non-core user using a MC")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[{"name":"%s", "passwordHash": "%s" }]`, wrongUser, passwordHash)}
mc.skipWaitForMcp = true
validateMcpRenderDegraded(mc, mcp, expectedRDMessage, expectedRDReason)
})
g.It("Author:sregidor-NonPreRelease-Longduration-High-59424-[P1][OnCLayer] ssh keys can be found in new dir on RHCOS9 node [Disruptive]", func() {
var (
allCoreOsNodes = wMcp.GetCoreOsNodesOrFail()
allMasters = mMcp.GetNodesOrFail()
)
skipTestIfRHELVersion(allCoreOsNodes[0], "<", "9.0")
exutil.By("Get currently configured authorizedkeys in the cluster")
wMc, err := wMcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current configuration for worker pool")
mMc, err := mMcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current configuration for master pool")
workerKeys, err := wMc.GetAuthorizedKeysByUserAsList("core")
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current authorizedkeys for user 'core' in worker pool")
masterKeys, err := mMc.GetAuthorizedKeysByUserAsList("core")
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current authorizedkeys for user 'core' in master pool")
logger.Infof("Number of AuthorizedKeys configured for worker nodes: %d", len(workerKeys))
logger.Infof("Number of AuthorizedKeys configured for master nodes: %d", len(masterKeys))
logger.Infof("Ok!\n")
exutil.By("Check the authorized key files in the nodes")
logger.Infof("CHECKING AUTHORIZED KEYS FILE IN COREOS WORKER POOL")
for _, worker := range allCoreOsNodes {
logger.Infof("Checking authorized keys file in node:%s", worker.GetName())
checkAuthorizedKeyInNode(worker, workerKeys)
logger.Infof("Ok!\n")
}
logger.Infof("CHECKING AUTHORIZED KEYS FILE IN MASTER POOL")
for _, master := range allMasters {
logger.Infof("Checking authorized keys file in node:%s", master.GetName())
checkAuthorizedKeyInNode(master, masterKeys)
logger.Infof("Ok!\n")
}
})
g.It("Author:sregidor-LEVEL0-WRS-NonPreRelease-Longduration-Critical-59426-V-BR.26-[P2][OnCLayer] ssh keys can be updated in new dir on RHCOS9 node[Disruptive]", func() {
var (
mcName = "tc-59426-add-ssh-key"
key1 = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPmGf/sfIYog1KaHj50H0vaDRITn4Wa8RN9bgc2jj6SejvxhAWZVc4BrRst6BdhGr34IowkZmz76ba9jfa4nGm2HNd+CGqf6KmUhwPjF9oJNjy3z5zT2i903OZii35MUnJl056YXgKYpN96WAD5LVOKop/+7Soxq4PW8TtVZeSpHiPNI28XiIdyqGLzJerhlgPLZBsNO0JcVH1DYLd/c4fh5GDLutszZH/dzAX5RmvN1P/cHie+BnkbgNx91NbrOLTrV5m3nY2End5uGDl8zhaGQ2BX2TmnMqWyxYkYuzNmQFprHMNCCpqLshFGRvCFZGpc6L/72mlpcJubzBF0t5Z [email protected]`
key2 = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf7nk9SKloQktDuu0DFDrWv8zRROnxKT04DQdz0RRWXwKyQWFbXi2t7MPkYHb+H7BfuCF8gd3BsfZbGenmRpHrm99bjbZWV6tyyyOWac88RGDXwTeSdcdgZoVDIQfW0S4/y7DP6uo6QGyZEh+s+VTGg8gcqm9L2GkjlA943UWUTyRIVQdex8qbtKdAI0NqYtAzuf1zYDGBob5/BdjT856dF7dDCJG36+d++VRXcyhE+SYxGdEC+OgYwRXjz3+J7XixvTAeY4DdGQOeppjOC/E+0TXh5T0m/+LfCJQCClSYvuxIKPkiMvmNHY4q4lOZUL1/FKIS2pn0P6KsqJ98JvqV [email protected]`
user = ign32PaswdUser{Name: "core", SSHAuthorizedKeys: []string{key1, key2}}
node = mcp.GetCoreOsNodesOrFail()[0]
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
isOCL = exutil.OrFail[bool](mcp.IsOCL())
)
skipTestIfRHELVersion(node, "<", "9.0")
exutil.By("Get currently configured authorizedkeys in the cluster")
currentMc, err := mcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current configuration for %s pool", mcp.GetName())
initialKeys, err := currentMc.GetAuthorizedKeysByUserAsList("core")
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current authorizedkeys for user 'core' in %s pool", mcp.GetName())
logger.Infof("Number of initially configured AuthorizedKeys: %d", len(initialKeys))
logger.Infof("OK!\n")
exutil.By("Get start time and start collecting events.")
startTime, dErr := node.GetDate()
o.Expect(dErr).ShouldNot(o.HaveOccurred(), "Error getting date in node %s", node.GetName())
o.Expect(node.IgnoreEventsBeforeNow()).NotTo(o.HaveOccurred(),
"Error getting the latest event in node %s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Create a new MC to deploy new authorized keys")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[%s]`, MarshalOrFail(user))}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that nodes are not drained nor rebooted")
checkDrainAction(isOCL, node, mcc)
logger.Infof("OK!\n")
exutil.By("Verify that the node was NOT rebooted")
checkRebootAction(isOCL, node, startTime)
logger.Infof("OK!\n")
if !isOCL {
exutil.By("Check events to make sure that drain and reboot events were not triggered")
nodeEvents, eErr := node.GetEvents()
o.Expect(eErr).ShouldNot(o.HaveOccurred(), "Error getting drain events for node %s", node.GetName())
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Drain"), "Error, a Drain event was triggered but it shouldn't")
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Reboot"), "Error, a Reboot event was triggered but it shouldn't")
logger.Infof("OK!\n")
}
exutil.By("Check that all expected keys are present")
checkAuthorizedKeyInNode(mcp.GetCoreOsNodesOrFail()[0], append(initialKeys, key1, key2))
logger.Infof("OK!\n")
exutil.By("Delete the MC with the new authorized keys")
mc.delete()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the new authorized keys are removed but the original keys are still present")
checkAuthorizedKeyInNode(node, initialKeys)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-62533-[OnCLayer] Passwd login must not work with ssh[Disruptive]", func() {
var (
mcName = "tc-62533-test-passwd-ssh-login"
)
allCoreos := mcp.GetCoreOsNodesOrFail()
if len(allCoreos) == 0 {
logger.Infof("No CoreOs nodes are configured in the %s pool. We use master pool for testing", mcp.GetName())
mcp = mMcp
allCoreos = mcp.GetCoreOsNodesOrFail()
}
node := allCoreos[0]
exutil.By("Configure a password for 'core' user")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[{"name":"%s", "passwordHash": "%s" }]`, user, passwordHash)}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the password cannot be used to login to the cluster via ssh")
logger.Infof("verifying node %s", node.GetName())
bresp, err := node.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdSSHValidator(user))
o.Expect(err).NotTo(o.HaveOccurred(), "Ssh login should not be allowed in node %s and should report a 'permission denied' error:\n %s", node.GetName(), bresp)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-64986-[P1][OnCLayer] Remove all ssh keys [Disruptive]", func() {
var (
sshMCName = "99-" + mcp.GetName() + "-ssh"
backupMCFile = filepath.Join(e2e.TestContext.OutputDir, "tc-64986-"+sshMCName+".backup.json")
)
exutil.By("Get currently configured authorizedkeys in the cluster")
currentMc, err := mcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current configuration for %s pool", mcp.GetName())
initialKeys, err := currentMc.GetAuthorizedKeysByUserAsList("core")
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current authorizedkeys for user 'core' in %s pool", mcp.GetName())
logger.Infof("Number of initially configured AuthorizedKeys: %d", len(initialKeys))
if len(initialKeys) > 1 {
logger.Infof("There is more than 1 ssh key configred in this cluster. Probably they have been added manually.")
g.Skip("There are more than 1 ssh key configured. The cluster has been probably manually modified. Check the configured ssh keys before running this test.")
}
logger.Infof("OK!\n")
exutil.By("Remove the ssh key MachineConfig")
sshMC := NewMachineConfig(oc.AsAdmin(), sshMCName, mcp.GetName())
// If the cluster was created with a ssh key, we remove it and force no sshkey in the cluster
if sshMC.Exists() {
logger.Infof("Save MC information in file: %s", backupMCFile)
o.Expect(sshMC.ExportToFile(backupMCFile)).To(o.Succeed(),
"It was not possible to save MC %s in file %s", sshMC.GetName(), backupMCFile)
defer func() {
logger.Infof("Restore the removed MC")
if !sshMC.Exists() {
OCCreate(oc.AsAdmin(), backupMCFile)
logger.Infof("Wait for MCP to be updated")
mcp.waitForComplete()
}
}()
sshMC.delete()
logger.Infof("OK!\n")
exutil.By("Check that the nodes have the correct configuration for ssh keys. No key configured.")
checkAuthorizedKeyInNode(mcp.GetCoreOsNodesOrFail()[0], []string{})
logger.Infof("OK!\n")
exutil.By("Restore the deleted MC")
o.Expect(OCCreate(oc.AsAdmin(), backupMCFile)).To(o.Succeed(),
"The deleted MC could not be restored")
mcp.waitForComplete()
logger.Infof("OK!\n")
} else {
logger.Infof("MachineConfig %s does not exist. No need to remove it", sshMC.GetName())
}
exutil.By("Check that the nodes have the correct configuration for ssh keys. Original keys.")
checkAuthorizedKeyInNode(mcp.GetCoreOsNodesOrFail()[0], initialKeys)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonPreRelease-High-75552-[P2][OnCLayer] apply ssh keys when root owns .ssh [Disruptive]", func() {
var (
node = mcp.GetSortedNodesOrFail()[0]
authKeysdDir = NewRemoteFile(node, "/home/core/.ssh/authorized_keys.d")
sshDir = NewRemoteFile(node, "/home/core/.ssh")
mcName = "tc-75552-ssh"
key = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf7nk9SKloQktDuu0DFDrWv8zRROnxKT04DQdz0RRWXwKyQWFbXi2t7MPkYHb+H7BfuCF8gd3BsfZbGenmRpHrm99bjbZWV6tyyyOWac88RGDXwTeSdcdgZoVDIQfW0S4/y7DP6uo6QGyZEh+s+VTGg8gcqm9L2GkjlA943UWUTyRIVQdex8qbtKdAI0NqYtAzuf1zYDGBob5/BdjT856dF7dDCJG36+d++VRXcyhE+SYxGdEC+OgYwRXjz3+J7XixvTAeY4DdGQOeppjOC/E+0TXh5T0m/+LfCJQCClSYvuxIKPkiMvmNHY4q4lOZUL1/FKIS2pn0P6KsqJ98JvqV [email protected]`
user = ign32PaswdUser{Name: "core", SSHAuthorizedKeys: []string{key}}
)
exutil.By("Get currently configured authorizedkeys in the cluster")
currentMc, err := mcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current configuration for %s pool", mcp.GetName())
initialKeys, err := currentMc.GetAuthorizedKeysByUserAsList("core")
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current authorizedkeys for user 'core' in %s pool", mcp.GetName())
logger.Infof("Number of initially configured AuthorizedKeys: %d", len(initialKeys))
logger.Infof("OK!\n")
exutil.By("Remove the authorized keys file from the node")
o.Expect(authKeysdDir.Rm("-rf")).To(o.Succeed(),
"Error removing %s", authKeysdDir)
logger.Infof("OK!\n")
exutil.By("Set root as the owner of the .ssh directory")
o.Expect(sshDir.PushNewOwner("root:root")).To(o.Succeed(),
"Error setting root owner in %s", sshDir)
logger.Infof("OK!\n")
// For debugging purpose
s, _ := node.DebugNodeWithChroot("ls", "-larth", "/home/core/.ssh")
logger.Infof("list /home/core/.ssh: \n %s", s)
exutil.By("Create a new MC to deploy new authorized keys")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[%s]`, MarshalOrFail(user))}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that all expected keys are present and with the right permissions and owners")
// This function checks the owners and the permissions in the .ssh and authorized_keys.d directories
checkAuthorizedKeyInNode(mcp.GetCoreOsNodesOrFail()[0], append(initialKeys, key))
logger.Infof("OK!\n")
})
})
// getPasswdValidator returns the commands that need to be executed in an interactive expect shell to validate that a user can login
func getPasswdValidator(user, passwd string) []expect.Batcher {
return []expect.Batcher{
&expect.BExpT{R: "#", T: 120}, // wait for prompt. We wait 120 seconds here, because the debug pod can take some time to be run
// in the rest of the commands we use the default timeout
&expect.BSnd{S: "chroot /host\n"}, // execute the chroot command
// &expect.BExp{R: "#"}, // wait for prompt
&expect.BExp{R: ".*"}, // wait for any prompt or no prompt (sometimes it does not return a prompt)
&expect.BSnd{S: fmt.Sprintf(`su %s -c "su %s -c 'echo OK'"`, user, user) + "\n"}, // run an echo command forcing the user authentication
&expect.BExp{R: "[pP]assword:"}, // wait for password question
&expect.BSnd{S: fmt.Sprintf("%s\n", passwd)}, // write the password
&expect.BExp{R: `OK`}, // wait for succeess message
}
}
// getPasswdSSHValidator returns the commands that need to be executed in an interactive expect shell to validate that a user can NOT login via ssh
func getPasswdSSHValidator(user string) []expect.Batcher {
return []expect.Batcher{
&expect.BExpT{R: "#", T: 120}, // wait for prompt. We wait 120 seconds here, because the debug pod can take some time to be run
&expect.BSnd{S: fmt.Sprintf("chroot /host ssh -o StrictHostKeyChecking=no %[email protected]\n", user)}, // run a ssh login command
&expect.BExp{R: `Permission denied`}, // wait for the login to be rejected because of permission denied
}
}
func checkAuthorizedKeyInNode(node Node, keys []string) {
logger.Infof("Checking old file /home/core/.ssh/authorized_keys")
rOldAuthorizedFile := NewRemoteFile(node, "/home/core/.ssh/authorized_keys")
o.Expect(rOldAuthorizedFile.Fetch()).ShouldNot(o.Succeed(),
"Old format authorized keys /home/core/.ssh/authorized_keys should not exist in node %s", node.GetName())
// If no key exists and .ssh directory does not exist either, then we have nothing to validate
if len(keys) == 0 {
logger.Infof("No authorized key is configured for node %s. Checking .ssh directory.", node.GetName())
rSSHDir := NewRemoteFile(node, "/home/core/.ssh")
if rSSHDir.Fetch() != nil {
logger.Infof("No authorized key is configured and /home/core/.ssh directory does not exist in node %s", node.GetName())
return
}
}
logger.Infof("Checking /home/core/.ssh")
rSSHDir := NewRemoteFile(node, "/home/core/.ssh")
o.Expect(rSSHDir.Fetch()).To(o.Succeed(), "/home/core/.ssh cannot be found in node %s", node.GetName())
o.Expect(rSSHDir.GetUIDName()).To(o.Equal("core"), "The user owner of /home/core/.ssh should be 'core' user in node %s", node.GetName())
o.Expect(rSSHDir.GetGIDName()).To(o.Equal("core"), "The group owner of /home/core/.ssh should be 'core' group in node %s", node.GetName())
o.Expect(rSSHDir.GetNpermissions()).To(o.Equal("0700"), "Wrong permissions in /home/core/.ssh file in node %s", node.GetName())
logger.Infof("Checking /home/core/.ssh/authorized_keys.d")
rAuthKeysDir := NewRemoteFile(node, "/home/core/.ssh/authorized_keys.d")
o.Expect(rAuthKeysDir.Fetch()).To(o.Succeed(), "/home/core/.ssh/authorized_keys.d cannot be found in node %s", node.GetName())
o.Expect(rAuthKeysDir.GetUIDName()).To(o.Equal("core"), "The user owner of /home/core/.ssh/authorized_keys.d should be 'core' user in node %s", node.GetName())
o.Expect(rAuthKeysDir.GetGIDName()).To(o.Equal("core"), "The group owner of /home/core/.ssh/authorized_keys.d should be 'core' group in node %s", node.GetName())
o.Expect(rAuthKeysDir.GetNpermissions()).To(o.Equal("0700"), "Wrong permissions in /home/core/.ssh/authorized_keys.d directory in node %s", node.GetName())
logger.Infof("Checking /home/core/.ssh/authorized_keys.d/ignition")
rIgnition := NewRemoteFile(node, "/home/core/.ssh/authorized_keys.d/ignition")
o.Expect(rIgnition.Fetch()).To(o.Succeed(), "/home/core/.ssh/authorized_keys.d/ignition cannot be found in node %s", node.GetName())
o.Expect(rIgnition.GetUIDName()).To(o.Equal("core"), "The user owner of /home/core/.ssh/authorized_keys.d/ignition should be 'core' user in node %s", node.GetName())
o.Expect(rIgnition.GetGIDName()).To(o.Equal("core"), "The group owner of /home/core/.ssh/authorized_keys.d/ignition should be 'core' group in node %s", node.GetName())
o.Expect(rIgnition.GetNpermissions()).To(o.Equal("0600"), "Wrong permissions in /home/core/.ssh/authorized_keys.d/ignition file in node %s", node.GetName())
if len(keys) > 0 {
for _, key := range keys {
o.Expect(rIgnition.GetTextContent()).To(o.ContainSubstring(key),
"A expected key does not exist. Wrong content in /home/core/.ssh/authorized_keys.d/ignition file in node %s", node.GetName())
}
} else {
o.Expect(rIgnition.GetTextContent()).To(o.BeEmpty(),
"File should be empty, but it is not. Wrong content in /home/core/.ssh/authorized_keys.d/ignition file in node %s", node.GetName())
}
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
aff1611d-e2b1-4a34-90f2-5dcc64d7b003
|
getPasswdValidator
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
func getPasswdValidator(user, passwd string) []expect.Batcher {
return []expect.Batcher{
&expect.BExpT{R: "#", T: 120}, // wait for prompt. We wait 120 seconds here, because the debug pod can take some time to be run
// in the rest of the commands we use the default timeout
&expect.BSnd{S: "chroot /host\n"}, // execute the chroot command
// &expect.BExp{R: "#"}, // wait for prompt
&expect.BExp{R: ".*"}, // wait for any prompt or no prompt (sometimes it does not return a prompt)
&expect.BSnd{S: fmt.Sprintf(`su %s -c "su %s -c 'echo OK'"`, user, user) + "\n"}, // run an echo command forcing the user authentication
&expect.BExp{R: "[pP]assword:"}, // wait for password question
&expect.BSnd{S: fmt.Sprintf("%s\n", passwd)}, // write the password
&expect.BExp{R: `OK`}, // wait for succeess message
}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
f66da545-35ff-4298-b40f-af43a0ed6b0d
|
getPasswdSSHValidator
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
func getPasswdSSHValidator(user string) []expect.Batcher {
return []expect.Batcher{
&expect.BExpT{R: "#", T: 120}, // wait for prompt. We wait 120 seconds here, because the debug pod can take some time to be run
&expect.BSnd{S: fmt.Sprintf("chroot /host ssh -o StrictHostKeyChecking=no %[email protected]\n", user)}, // run a ssh login command
&expect.BExp{R: `Permission denied`}, // wait for the login to be rejected because of permission denied
}
}
|
mco
| ||||
function
|
openshift/openshift-tests-private
|
2efc401c-b3be-4bce-b301-0c55295900a8
|
checkAuthorizedKeyInNode
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
func checkAuthorizedKeyInNode(node Node, keys []string) {
logger.Infof("Checking old file /home/core/.ssh/authorized_keys")
rOldAuthorizedFile := NewRemoteFile(node, "/home/core/.ssh/authorized_keys")
o.Expect(rOldAuthorizedFile.Fetch()).ShouldNot(o.Succeed(),
"Old format authorized keys /home/core/.ssh/authorized_keys should not exist in node %s", node.GetName())
// If no key exists and .ssh directory does not exist either, then we have nothing to validate
if len(keys) == 0 {
logger.Infof("No authorized key is configured for node %s. Checking .ssh directory.", node.GetName())
rSSHDir := NewRemoteFile(node, "/home/core/.ssh")
if rSSHDir.Fetch() != nil {
logger.Infof("No authorized key is configured and /home/core/.ssh directory does not exist in node %s", node.GetName())
return
}
}
logger.Infof("Checking /home/core/.ssh")
rSSHDir := NewRemoteFile(node, "/home/core/.ssh")
o.Expect(rSSHDir.Fetch()).To(o.Succeed(), "/home/core/.ssh cannot be found in node %s", node.GetName())
o.Expect(rSSHDir.GetUIDName()).To(o.Equal("core"), "The user owner of /home/core/.ssh should be 'core' user in node %s", node.GetName())
o.Expect(rSSHDir.GetGIDName()).To(o.Equal("core"), "The group owner of /home/core/.ssh should be 'core' group in node %s", node.GetName())
o.Expect(rSSHDir.GetNpermissions()).To(o.Equal("0700"), "Wrong permissions in /home/core/.ssh file in node %s", node.GetName())
logger.Infof("Checking /home/core/.ssh/authorized_keys.d")
rAuthKeysDir := NewRemoteFile(node, "/home/core/.ssh/authorized_keys.d")
o.Expect(rAuthKeysDir.Fetch()).To(o.Succeed(), "/home/core/.ssh/authorized_keys.d cannot be found in node %s", node.GetName())
o.Expect(rAuthKeysDir.GetUIDName()).To(o.Equal("core"), "The user owner of /home/core/.ssh/authorized_keys.d should be 'core' user in node %s", node.GetName())
o.Expect(rAuthKeysDir.GetGIDName()).To(o.Equal("core"), "The group owner of /home/core/.ssh/authorized_keys.d should be 'core' group in node %s", node.GetName())
o.Expect(rAuthKeysDir.GetNpermissions()).To(o.Equal("0700"), "Wrong permissions in /home/core/.ssh/authorized_keys.d directory in node %s", node.GetName())
logger.Infof("Checking /home/core/.ssh/authorized_keys.d/ignition")
rIgnition := NewRemoteFile(node, "/home/core/.ssh/authorized_keys.d/ignition")
o.Expect(rIgnition.Fetch()).To(o.Succeed(), "/home/core/.ssh/authorized_keys.d/ignition cannot be found in node %s", node.GetName())
o.Expect(rIgnition.GetUIDName()).To(o.Equal("core"), "The user owner of /home/core/.ssh/authorized_keys.d/ignition should be 'core' user in node %s", node.GetName())
o.Expect(rIgnition.GetGIDName()).To(o.Equal("core"), "The group owner of /home/core/.ssh/authorized_keys.d/ignition should be 'core' group in node %s", node.GetName())
o.Expect(rIgnition.GetNpermissions()).To(o.Equal("0600"), "Wrong permissions in /home/core/.ssh/authorized_keys.d/ignition file in node %s", node.GetName())
if len(keys) > 0 {
for _, key := range keys {
o.Expect(rIgnition.GetTextContent()).To(o.ContainSubstring(key),
"A expected key does not exist. Wrong content in /home/core/.ssh/authorized_keys.d/ignition file in node %s", node.GetName())
}
} else {
o.Expect(rIgnition.GetTextContent()).To(o.BeEmpty(),
"File should be empty, but it is not. Wrong content in /home/core/.ssh/authorized_keys.d/ignition file in node %s", node.GetName())
}
}
|
mco
| |||||
test case
|
openshift/openshift-tests-private
|
ce4c356c-7562-4a12-8221-ed6b2a31b75d
|
Author:sregidor-NonPreRelease-Longduration-High-59417-[P1][OnCLayer] MCD create/update password with MachineConfig in CoreOS nodes[Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-High-59417-[P1][OnCLayer] MCD create/update password with MachineConfig in CoreOS nodes[Disruptive]", func() {
var (
mcName = "tc-59417-test-core-passwd"
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
isOCL = exutil.OrFail[bool](mcp.IsOCL())
)
allCoreos := mcp.GetCoreOsNodesOrFail()
if len(allCoreos) == 0 {
logger.Infof("No CoreOs nodes are configured in the pool %s. We use master pool for testing", mcp.GetName())
mcp = mMcp
allCoreos = mcp.GetCoreOsNodesOrFail()
}
node := allCoreos[0]
startTime := node.GetDateOrFail()
exutil.By("Configure a password for 'core' user")
_, _ = node.GetDate() // for debugging purposes, it prints the node's current time in the logs
o.Expect(node.IgnoreEventsBeforeNow()).NotTo(o.HaveOccurred(),
"Error getting the latest event in node %s", node.GetName())
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[{"name":"%s", "passwordHash": "%s" }]`, user, passwordHash)}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check MCD logs to make sure drain and reboot are skipped unless OCL")
checkDrainAction(isOCL, node, mcc)
checkRebootAction(isOCL, node, startTime)
logger.Infof("OK!\n")
if !isOCL {
exutil.By("Check events to make sure that drain and reboot events were not triggered")
nodeEvents, eErr := node.GetEvents()
o.Expect(eErr).ShouldNot(o.HaveOccurred(), "Error getting drain events for node %s", node.GetName())
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Drain"), "Error, a Drain event was triggered but it shouldn't")
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Reboot"), "Error, a Reboot event was triggered but it shouldn't")
logger.Infof("OK!\n")
} else {
logger.Infof("OCL pool, skipping events checks")
}
exutil.By("Verify that user 'core' can login with the configured password")
logger.Infof("verifying node %s", node.GetName())
bresp, err := node.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, password))
o.Expect(err).NotTo(o.HaveOccurred(), "Error in the login process in node %s:\n %s", node.GetName(), bresp)
logger.Infof("OK!\n")
exutil.By("Update the password value")
patchErr := mc.Patch("json",
fmt.Sprintf(`[{ "op": "add", "path": "/spec/config/passwd/users/0/passwordHash", "value": "%s"}]`, updatedPasswdHash))
o.Expect(patchErr).NotTo(o.HaveOccurred(),
"Error patching mc %s to update the 'core' user password")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify that user 'core' can login with the new password")
logger.Infof("verifying node %s", node.GetName())
bresp, err = node.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, updatedPassword))
o.Expect(err).NotTo(o.HaveOccurred(), "Error in the login process in node %s:\n %s", node.GetName(), bresp)
logger.Infof("OK!\n")
exutil.By("Remove the password")
mc.deleteNoWait()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify that user 'core' can not login using a password anymore")
logger.Infof("verifying node %s", node.GetName())
bresp, err = node.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, updatedPassword))
o.Expect(err).To(o.HaveOccurred(), "User 'core' was able to login using a password in node %s, but it should not be possible:\n %s", node.GetName(), bresp)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
8e452534-e64c-4f24-8124-df47ed90150d
|
Author:sregidor-NonPreRelease-Longduration-High-60129-[P2][OnCLayer] MCD create/update password with MachineConfig in RHEL nodes[Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-High-60129-[P2][OnCLayer] MCD create/update password with MachineConfig in RHEL nodes[Disruptive]", func() {
var (
mcName = "tc-60129-test-core-passwd"
)
allRhelNodes := NewNodeList(oc).GetAllRhelWokerNodesOrFail()
if len(allRhelNodes) == 0 {
g.Skip("There are no rhel worker nodes in this cluster")
}
allWorkerNodes := NewNodeList(oc).GetAllLinuxWorkerNodesOrFail()
exutil.By("Create the 'core' user in RHEL nodes")
for _, rhelWorker := range allRhelNodes {
// we need to do this to avoid the loop variable to override our value
if !rhelWorker.UserExists(user) {
worker := rhelWorker
defer func() { worker.UserDel(user) }()
o.Expect(worker.UserAdd(user)).NotTo(o.HaveOccurred(),
"Error creating user in node %s", worker.GetName())
} else {
logger.Infof("User %s already exists in node %s. Skip creation.", user, rhelWorker.GetName())
}
}
exutil.By("Configure a password for 'core' user")
mc := NewMachineConfig(oc.AsAdmin(), mcName, MachineConfigPoolWorker)
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[{"name":"%s", "passwordHash": "%s" }]`, user, passwordHash)}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify that user 'core' can login with the configured password")
for _, workerNode := range allWorkerNodes {
logger.Infof("Verifying node %s", workerNode.GetName())
bresp, err := workerNode.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, password))
o.Expect(err).NotTo(o.HaveOccurred(), "Error in the login process in node %s:\n %s", workerNode.GetName(), bresp)
}
logger.Infof("OK!\n")
exutil.By("Update the password value")
patchErr := mc.Patch("json",
fmt.Sprintf(`[{ "op": "add", "path": "/spec/config/passwd/users/0/passwordHash", "value": "%s"}]`, updatedPasswdHash))
o.Expect(patchErr).NotTo(o.HaveOccurred(),
"Error patching mc %s to update the 'core' user password")
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify that user 'core' can login with the new password")
for _, workerNode := range allWorkerNodes {
logger.Infof("Verifying node %s", workerNode.GetName())
bresp, err := workerNode.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, updatedPassword))
o.Expect(err).NotTo(o.HaveOccurred(), "Error in the login process in node %s:\n %s", workerNode.GetName(), bresp)
}
logger.Infof("OK!\n")
exutil.By("Remove the password")
mc.deleteNoWait()
wMcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Verify that user 'core' can not login using a password anymore")
for _, workerNode := range allWorkerNodes {
logger.Infof("Verifying node %s", workerNode.GetName())
bresp, err := workerNode.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdValidator(user, updatedPassword))
o.Expect(err).To(o.HaveOccurred(), "User 'core' was able to login using a password in node %s, but it should not be possible:\n %s", workerNode.GetName(), bresp)
}
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
3144f562-81ae-407f-abce-fd64d2a2fb80
|
Author:sregidor-NonPreRelease-Longduration-Medium-72137-[OnCLayer] Create a password for a user different from 'core' user[Disruptive]
|
['"fmt"', '"regexp"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-72137-[OnCLayer] Create a password for a user different from 'core' user[Disruptive]", func() {
var (
mcName = "mco-tc-59900-wrong-user-password"
wrongUser = "root"
passwordHash = "fake-hash"
expectedRDReason = ""
expectedRDMessage = regexp.QuoteMeta(`ignition passwd user section contains unsupported changes: non-core user`)
)
exutil.By("Create a password for a non-core user using a MC")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[{"name":"%s", "passwordHash": "%s" }]`, wrongUser, passwordHash)}
mc.skipWaitForMcp = true
validateMcpRenderDegraded(mc, mcp, expectedRDMessage, expectedRDReason)
})
| |||||
test case
|
openshift/openshift-tests-private
|
25b2ec88-c58e-4565-871a-44cf15ca5d94
|
Author:sregidor-NonPreRelease-Longduration-High-59424-[P1][OnCLayer] ssh keys can be found in new dir on RHCOS9 node [Disruptive]
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-High-59424-[P1][OnCLayer] ssh keys can be found in new dir on RHCOS9 node [Disruptive]", func() {
var (
allCoreOsNodes = wMcp.GetCoreOsNodesOrFail()
allMasters = mMcp.GetNodesOrFail()
)
skipTestIfRHELVersion(allCoreOsNodes[0], "<", "9.0")
exutil.By("Get currently configured authorizedkeys in the cluster")
wMc, err := wMcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current configuration for worker pool")
mMc, err := mMcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current configuration for master pool")
workerKeys, err := wMc.GetAuthorizedKeysByUserAsList("core")
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current authorizedkeys for user 'core' in worker pool")
masterKeys, err := mMc.GetAuthorizedKeysByUserAsList("core")
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current authorizedkeys for user 'core' in master pool")
logger.Infof("Number of AuthorizedKeys configured for worker nodes: %d", len(workerKeys))
logger.Infof("Number of AuthorizedKeys configured for master nodes: %d", len(masterKeys))
logger.Infof("Ok!\n")
exutil.By("Check the authorized key files in the nodes")
logger.Infof("CHECKING AUTHORIZED KEYS FILE IN COREOS WORKER POOL")
for _, worker := range allCoreOsNodes {
logger.Infof("Checking authorized keys file in node:%s", worker.GetName())
checkAuthorizedKeyInNode(worker, workerKeys)
logger.Infof("Ok!\n")
}
logger.Infof("CHECKING AUTHORIZED KEYS FILE IN MASTER POOL")
for _, master := range allMasters {
logger.Infof("Checking authorized keys file in node:%s", master.GetName())
checkAuthorizedKeyInNode(master, masterKeys)
logger.Infof("Ok!\n")
}
})
| ||||||
test case
|
openshift/openshift-tests-private
|
7ecbe2ee-e117-438b-bb15-ecc5767a0b60
|
Author:sregidor-LEVEL0-WRS-NonPreRelease-Longduration-Critical-59426-V-BR.26-[P2][OnCLayer] ssh keys can be updated in new dir on RHCOS9 node[Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
g.It("Author:sregidor-LEVEL0-WRS-NonPreRelease-Longduration-Critical-59426-V-BR.26-[P2][OnCLayer] ssh keys can be updated in new dir on RHCOS9 node[Disruptive]", func() {
var (
mcName = "tc-59426-add-ssh-key"
key1 = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPmGf/sfIYog1KaHj50H0vaDRITn4Wa8RN9bgc2jj6SejvxhAWZVc4BrRst6BdhGr34IowkZmz76ba9jfa4nGm2HNd+CGqf6KmUhwPjF9oJNjy3z5zT2i903OZii35MUnJl056YXgKYpN96WAD5LVOKop/+7Soxq4PW8TtVZeSpHiPNI28XiIdyqGLzJerhlgPLZBsNO0JcVH1DYLd/c4fh5GDLutszZH/dzAX5RmvN1P/cHie+BnkbgNx91NbrOLTrV5m3nY2End5uGDl8zhaGQ2BX2TmnMqWyxYkYuzNmQFprHMNCCpqLshFGRvCFZGpc6L/72mlpcJubzBF0t5Z [email protected]`
key2 = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf7nk9SKloQktDuu0DFDrWv8zRROnxKT04DQdz0RRWXwKyQWFbXi2t7MPkYHb+H7BfuCF8gd3BsfZbGenmRpHrm99bjbZWV6tyyyOWac88RGDXwTeSdcdgZoVDIQfW0S4/y7DP6uo6QGyZEh+s+VTGg8gcqm9L2GkjlA943UWUTyRIVQdex8qbtKdAI0NqYtAzuf1zYDGBob5/BdjT856dF7dDCJG36+d++VRXcyhE+SYxGdEC+OgYwRXjz3+J7XixvTAeY4DdGQOeppjOC/E+0TXh5T0m/+LfCJQCClSYvuxIKPkiMvmNHY4q4lOZUL1/FKIS2pn0P6KsqJ98JvqV [email protected]`
user = ign32PaswdUser{Name: "core", SSHAuthorizedKeys: []string{key1, key2}}
node = mcp.GetCoreOsNodesOrFail()[0]
mcc = NewController(oc.AsAdmin()).IgnoreLogsBeforeNowOrFail()
isOCL = exutil.OrFail[bool](mcp.IsOCL())
)
skipTestIfRHELVersion(node, "<", "9.0")
exutil.By("Get currently configured authorizedkeys in the cluster")
currentMc, err := mcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current configuration for %s pool", mcp.GetName())
initialKeys, err := currentMc.GetAuthorizedKeysByUserAsList("core")
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current authorizedkeys for user 'core' in %s pool", mcp.GetName())
logger.Infof("Number of initially configured AuthorizedKeys: %d", len(initialKeys))
logger.Infof("OK!\n")
exutil.By("Get start time and start collecting events.")
startTime, dErr := node.GetDate()
o.Expect(dErr).ShouldNot(o.HaveOccurred(), "Error getting date in node %s", node.GetName())
o.Expect(node.IgnoreEventsBeforeNow()).NotTo(o.HaveOccurred(),
"Error getting the latest event in node %s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Create a new MC to deploy new authorized keys")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[%s]`, MarshalOrFail(user))}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that nodes are not drained nor rebooted")
checkDrainAction(isOCL, node, mcc)
logger.Infof("OK!\n")
exutil.By("Verify that the node was NOT rebooted")
checkRebootAction(isOCL, node, startTime)
logger.Infof("OK!\n")
if !isOCL {
exutil.By("Check events to make sure that drain and reboot events were not triggered")
nodeEvents, eErr := node.GetEvents()
o.Expect(eErr).ShouldNot(o.HaveOccurred(), "Error getting drain events for node %s", node.GetName())
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Drain"), "Error, a Drain event was triggered but it shouldn't")
o.Expect(nodeEvents).NotTo(HaveEventsSequence("Reboot"), "Error, a Reboot event was triggered but it shouldn't")
logger.Infof("OK!\n")
}
exutil.By("Check that all expected keys are present")
checkAuthorizedKeyInNode(mcp.GetCoreOsNodesOrFail()[0], append(initialKeys, key1, key2))
logger.Infof("OK!\n")
exutil.By("Delete the MC with the new authorized keys")
mc.delete()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the new authorized keys are removed but the original keys are still present")
checkAuthorizedKeyInNode(node, initialKeys)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
6639574e-30d8-44eb-9f6b-f9efd0e198fc
|
Author:sregidor-NonPreRelease-Longduration-Medium-62533-[OnCLayer] Passwd login must not work with ssh[Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-62533-[OnCLayer] Passwd login must not work with ssh[Disruptive]", func() {
var (
mcName = "tc-62533-test-passwd-ssh-login"
)
allCoreos := mcp.GetCoreOsNodesOrFail()
if len(allCoreos) == 0 {
logger.Infof("No CoreOs nodes are configured in the %s pool. We use master pool for testing", mcp.GetName())
mcp = mMcp
allCoreos = mcp.GetCoreOsNodesOrFail()
}
node := allCoreos[0]
exutil.By("Configure a password for 'core' user")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[{"name":"%s", "passwordHash": "%s" }]`, user, passwordHash)}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the password cannot be used to login to the cluster via ssh")
logger.Infof("verifying node %s", node.GetName())
bresp, err := node.ExecuteDebugExpectBatch(DefaultExpectTimeout, getPasswdSSHValidator(user))
o.Expect(err).NotTo(o.HaveOccurred(), "Ssh login should not be allowed in node %s and should report a 'permission denied' error:\n %s", node.GetName(), bresp)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
eda0e173-81c5-4bb9-b647-4cd671079458
|
Author:sregidor-NonPreRelease-Longduration-Medium-64986-[P1][OnCLayer] Remove all ssh keys [Disruptive]
|
['"path/filepath"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
g.It("Author:sregidor-NonPreRelease-Longduration-Medium-64986-[P1][OnCLayer] Remove all ssh keys [Disruptive]", func() {
var (
sshMCName = "99-" + mcp.GetName() + "-ssh"
backupMCFile = filepath.Join(e2e.TestContext.OutputDir, "tc-64986-"+sshMCName+".backup.json")
)
exutil.By("Get currently configured authorizedkeys in the cluster")
currentMc, err := mcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current configuration for %s pool", mcp.GetName())
initialKeys, err := currentMc.GetAuthorizedKeysByUserAsList("core")
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current authorizedkeys for user 'core' in %s pool", mcp.GetName())
logger.Infof("Number of initially configured AuthorizedKeys: %d", len(initialKeys))
if len(initialKeys) > 1 {
logger.Infof("There is more than 1 ssh key configred in this cluster. Probably they have been added manually.")
g.Skip("There are more than 1 ssh key configured. The cluster has been probably manually modified. Check the configured ssh keys before running this test.")
}
logger.Infof("OK!\n")
exutil.By("Remove the ssh key MachineConfig")
sshMC := NewMachineConfig(oc.AsAdmin(), sshMCName, mcp.GetName())
// If the cluster was created with a ssh key, we remove it and force no sshkey in the cluster
if sshMC.Exists() {
logger.Infof("Save MC information in file: %s", backupMCFile)
o.Expect(sshMC.ExportToFile(backupMCFile)).To(o.Succeed(),
"It was not possible to save MC %s in file %s", sshMC.GetName(), backupMCFile)
defer func() {
logger.Infof("Restore the removed MC")
if !sshMC.Exists() {
OCCreate(oc.AsAdmin(), backupMCFile)
logger.Infof("Wait for MCP to be updated")
mcp.waitForComplete()
}
}()
sshMC.delete()
logger.Infof("OK!\n")
exutil.By("Check that the nodes have the correct configuration for ssh keys. No key configured.")
checkAuthorizedKeyInNode(mcp.GetCoreOsNodesOrFail()[0], []string{})
logger.Infof("OK!\n")
exutil.By("Restore the deleted MC")
o.Expect(OCCreate(oc.AsAdmin(), backupMCFile)).To(o.Succeed(),
"The deleted MC could not be restored")
mcp.waitForComplete()
logger.Infof("OK!\n")
} else {
logger.Infof("MachineConfig %s does not exist. No need to remove it", sshMC.GetName())
}
exutil.By("Check that the nodes have the correct configuration for ssh keys. Original keys.")
checkAuthorizedKeyInNode(mcp.GetCoreOsNodesOrFail()[0], initialKeys)
logger.Infof("OK!\n")
})
| |||||
test case
|
openshift/openshift-tests-private
|
2bef1847-2b0c-4a1f-ad40-5f3986a22740
|
Author:sregidor-NonPreRelease-High-75552-[P2][OnCLayer] apply ssh keys when root owns .ssh [Disruptive]
|
['"fmt"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_password.go
|
g.It("Author:sregidor-NonPreRelease-High-75552-[P2][OnCLayer] apply ssh keys when root owns .ssh [Disruptive]", func() {
var (
node = mcp.GetSortedNodesOrFail()[0]
authKeysdDir = NewRemoteFile(node, "/home/core/.ssh/authorized_keys.d")
sshDir = NewRemoteFile(node, "/home/core/.ssh")
mcName = "tc-75552-ssh"
key = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf7nk9SKloQktDuu0DFDrWv8zRROnxKT04DQdz0RRWXwKyQWFbXi2t7MPkYHb+H7BfuCF8gd3BsfZbGenmRpHrm99bjbZWV6tyyyOWac88RGDXwTeSdcdgZoVDIQfW0S4/y7DP6uo6QGyZEh+s+VTGg8gcqm9L2GkjlA943UWUTyRIVQdex8qbtKdAI0NqYtAzuf1zYDGBob5/BdjT856dF7dDCJG36+d++VRXcyhE+SYxGdEC+OgYwRXjz3+J7XixvTAeY4DdGQOeppjOC/E+0TXh5T0m/+LfCJQCClSYvuxIKPkiMvmNHY4q4lOZUL1/FKIS2pn0P6KsqJ98JvqV [email protected]`
user = ign32PaswdUser{Name: "core", SSHAuthorizedKeys: []string{key}}
)
exutil.By("Get currently configured authorizedkeys in the cluster")
currentMc, err := mcp.GetConfiguredMachineConfig()
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current configuration for %s pool", mcp.GetName())
initialKeys, err := currentMc.GetAuthorizedKeysByUserAsList("core")
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the current authorizedkeys for user 'core' in %s pool", mcp.GetName())
logger.Infof("Number of initially configured AuthorizedKeys: %d", len(initialKeys))
logger.Infof("OK!\n")
exutil.By("Remove the authorized keys file from the node")
o.Expect(authKeysdDir.Rm("-rf")).To(o.Succeed(),
"Error removing %s", authKeysdDir)
logger.Infof("OK!\n")
exutil.By("Set root as the owner of the .ssh directory")
o.Expect(sshDir.PushNewOwner("root:root")).To(o.Succeed(),
"Error setting root owner in %s", sshDir)
logger.Infof("OK!\n")
// For debugging purpose
s, _ := node.DebugNodeWithChroot("ls", "-larth", "/home/core/.ssh")
logger.Infof("list /home/core/.ssh: \n %s", s)
exutil.By("Create a new MC to deploy new authorized keys")
mc := NewMachineConfig(oc.AsAdmin(), mcName, mcp.GetName())
mc.parameters = []string{fmt.Sprintf(`PWDUSERS=[%s]`, MarshalOrFail(user))}
mc.skipWaitForMcp = true
defer mc.delete()
mc.create()
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that all expected keys are present and with the right permissions and owners")
// This function checks the owners and the permissions in the .ssh and authorized_keys.d directories
checkAuthorizedKeyInNode(mcp.GetCoreOsNodesOrFail()[0], append(initialKeys, key))
logger.Infof("OK!\n")
})
| |||||
test
|
openshift/openshift-tests-private
|
265e7b59-436d-43a3-9fe5-bb1f199b1d8c
|
mco_pinnedimages
|
import (
"fmt"
"math"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
package mco
import (
"fmt"
"math"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
logger "github.com/openshift/openshift-tests-private/test/extended/util/logext"
)
var _ = g.Describe("[sig-mco] MCO Pinnedimages", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("mco-pinnedimages", exutil.KubeConfigPath())
wMcp *MachineConfigPool
mMcp *MachineConfigPool
// Compact compatible MCP. If the node is compact/SNO this variable will be the master pool, else it will be the worker pool
mcp *MachineConfigPool
)
g.JustBeforeEach(func() {
// The pinnedimageset feature is currently only supported in techpreview
skipIfNoTechPreview(oc.AsAdmin())
wMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)
mMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
mcp = GetCompactCompatiblePool(oc.AsAdmin())
logger.Infof("%s %s %s", wMcp, mMcp, mcp)
preChecks(oc)
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73659-[P1][OnCLayer] Pinned images when disk-pressure [Disruptive]", func() {
var (
waitForPinned = time.Minute * 5
pinnedImageSetName = "tc-73659-pin-images-disk-pressure"
pinnedImageName = BusyBoxImage
allNodes = mcp.GetNodesOrFail()
node = allNodes[0]
cleanFileTimedService = generateTemplateAbsolutePath("tc-73659-clean-file-timed.service")
cleanFileTimedServiceDestinationPath = "/etc/systemd/system/tc-73659-clean-file-timed.service"
)
exutil.By("Get disk usage in node")
diskUsage, err := node.GetFileSystemSpaceUsage("/var/lib/containers/storage/")
o.Expect(err).NotTo(o.HaveOccurred(),
"Cannot get the disk usage in node %s", node.GetName())
logger.Infof("OK!\n")
exutil.By("Create a timed service that will restore the original disk usage after 5 minutes")
logger.Infof("Copy the service in the node")
defer node.DebugNodeWithChroot("rm", cleanFileTimedServiceDestinationPath)
o.Expect(node.CopyFromLocal(cleanFileTimedService, cleanFileTimedServiceDestinationPath)).
NotTo(o.HaveOccurred(),
"Error copying %s to %s in node %s", cleanFileTimedService, cleanFileTimedServiceDestinationPath, node.GetName())
// We create transient timer that will execute the sercive, this service will restore the disk usage to its original usage
logger.Infof("Create a transient timer to execute the service after 5 mintues")
// If an error happens, the transient timer will not be deleted unless we execute this command
defer node.DebugNodeWithChroot("systemctl", "reset-failed", "tc-73659-clean-file-timed.service")
defer node.DebugNodeWithChroot("systemctl", "stop", "tc-73659-clean-file-timed.service")
_, err = node.DebugNodeWithChroot("systemd-run", `--on-active=5minutes`, `--unit=tc-73659-clean-file-timed.service`)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating the transient timer")
logger.Infof("OK!\n")
exutil.By("Create a huge file so that the node reports disk pressure. Use about 90 per cent of the free space in the disk")
fileSize := ((diskUsage.Avail + diskUsage.Used) * 9 / 10) - diskUsage.Used // calculate the file size to use a 90% of the disk space
o.Expect(fileSize).To(o.And(
o.BeNumerically("<", diskUsage.Avail),
o.BeNumerically(">", 0)),
"Error not enough space on device to execute this test. Available: %d, Used %d", diskUsage.Avail, diskUsage.Used)
_, err = node.DebugNodeWithChroot("fallocate", "-l", fmt.Sprintf("%d", fileSize), "/var/lib/containers/storage/tc-73659-huge-test-file.file")
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a file to trigger disk pressure")
logger.Infof("OK!\n")
exutil.By("Wait for disk pressure to be reported")
// It makes no sense to wait longer than the 5 minutes time out that we use to fix the disk usage.
// If we need to increse this timeout, we need to increase the transiente timer too
o.Eventually(&node, "5m", "20s").Should(HaveConditionField("DiskPressure", "status", TrueString),
"Node is not reporting DiskPressure, but it should.\n%s", node.PrettyString())
logger.Infof("OK!\n")
exutil.By("Pin images")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImageName})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Check the degraded status")
logger.Infof("Check that the node with disk pressure is reporting pinnedimagesetdegraded status")
mcn := node.GetMachineConfigNode()
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", TrueString),
"MachineConfigNode was not degraded.\n%s\n%s", mcn.PrettyString(), node.PrettyString())
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "reason", "PrefetchFailed"),
"MachineConfigNode was not degraded with the expected reason.\n%s\n%s", mcn.PrettyString(), node.PrettyString())
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "message", `node `+node.GetName()+` is reporting OutOfDisk=True`),
"MachineConfigNode was not degraded with the expected message.\n%s\n%s", mcn.PrettyString(), node.PrettyString())
logger.Infof("Check that the rest of the nodes could pin the image and are not degraded")
for _, n := range allNodes {
if n.GetName() != node.GetName() {
logger.Infof("Checking node %s", n.GetName())
o.Eventually(n.GetMachineConfigNode, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", FalseString),
"MachineConfigNode was degraded.\n%s\n%s", node.GetMachineConfigNode().PrettyString(), node.PrettyString())
rmi := NewRemoteImage(n, pinnedImageName)
o.Eventually(rmi.IsPinned, "5m", "20s").Should(o.BeTrue(), "%s should be pinned but it is not", rmi)
}
}
logger.Infof("OK!\n")
exutil.By("Wait for disk pressure to be fixed") // It should be fixed by the timed service that was created before
o.Eventually(&node, "20m", "20s").Should(HaveConditionField("DiskPressure", "status", FalseString),
"Node is reporting DiskPressure, but it should not.\n%s", node.PrettyString())
logger.Infof("OK!\n")
exutil.By("Check that the degraded status was fixed")
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", FalseString),
"MachineConfigNode was not degraded.\n%s\n%s", mcn.PrettyString(), node.PrettyString())
o.Eventually(NewRemoteImage(node, pinnedImageName).IsPinned, "2m", "20s").Should(o.BeTrue(),
"The degraded status was fixed, but the image was not pinned")
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-High-73623-[P2][OnCLayer] Pin images [Disruptive]", func() {
var (
waitForPinned = time.Minute * 15
pinnedImageSetName = "tc-73623-pin-images"
node = mcp.GetNodesOrFail()[0]
firstPinnedImage = NewRemoteImage(node, BusyBoxImage)
secondPinnedImage = NewRemoteImage(node, AlpineImage)
)
exutil.By("Remove images")
_ = firstPinnedImage.Rmi()
_ = secondPinnedImage.Rmi()
logger.Infof("OK!\n")
exutil.By("Pin images")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{firstPinnedImage.ImageName, secondPinnedImage.ImageName})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that the images are pinned")
o.Expect(firstPinnedImage.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", firstPinnedImage)
o.Expect(secondPinnedImage.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", secondPinnedImage)
logger.Infof("OK!\n")
exutil.By("Patch the pinnedimageset and remove one image")
o.Expect(
pis.Patch("json", fmt.Sprintf(`[{"op": "replace", "path": "/spec/pinnedImages", "value": [{"name": "%s"}]}]`, firstPinnedImage.ImageName)),
).To(o.Succeed(),
"Error patching %s to remove one image")
logger.Infof("OK!\n")
exutil.By("Wait for the pinnedimageset changes to be applied")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that only the image reamaining in the pinnedimageset is pinned")
o.Expect(firstPinnedImage.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", firstPinnedImage)
o.Expect(secondPinnedImage.IsPinned()).To(o.BeFalse(), "%s is pinned, but it should NOT", secondPinnedImage)
logger.Infof("OK!\n")
exutil.By("Remove the pinnedimageset")
o.Expect(pis.Delete()).To(o.Succeed(), "Error removing %s", pis)
logger.Infof("OK!\n")
exutil.By("Wait for the pinnedimageset removal to be applied")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that only the image reamaining in the pinnedimageset is pinned")
o.Expect(firstPinnedImage.IsPinned()).To(o.BeFalse(), "%s is pinned, but it should NOT", firstPinnedImage)
o.Expect(secondPinnedImage.IsPinned()).To(o.BeFalse(), "%s is pinned, but it should NOT", secondPinnedImage)
logger.Infof("OK!\n")
})
// Disconnected clusters use an imagecontentsourcepolicy to mirror the images in openshittest. In this test cases we create an ImageDigestMirrorSet to mirror the same images and it is not supported
// Hence we skip this test case in disconnected clusters
g.It("Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-High-73653-[OnCLayer] Pinned images with a ImageDigestMirrorSet mirroring a single repository [Disruptive]", func() {
var (
idmsName = "tc-73653-mirror-single-repository"
idmsMirrors = `[{"mirrors":["quay.io/openshifttest/busybox"], "source": "example-repo.io/digest-example/mybusy", "mirrorSourcePolicy":"NeverContactSource"}]`
// actually quay.io/openshifttest/busybox@sha256:c5439d7db88ab5423999530349d327b04279ad3161d7596d2126dfb5b02bfd1f but using our configured mirror instead
pinnedImage = strings.Replace(BusyBoxImage, "quay.io/openshifttest/busybox", "example-repo.io/digest-example/mybusy", 1)
pinnedImageSetName = "tc-73653-mirror-single-repository"
)
DigestMirrorTest(oc, mcp, idmsName, idmsMirrors, pinnedImage, pinnedImageSetName)
})
// Disconnected clusters use an imagecontentsourcepolicy to mirror the images in openshittest. In this test cases we create an ImageDigestMirrorSet to mirror the same images and it is not supported
// Hence we skip this test case in disconnected clusters
g.It("Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-High-73657-[P1][OnCLayer] Pinned images with a ImageDigestMirrorSet mirroring a domain [Disruptive]", func() {
var (
idmsName = "tc-73657-mirror-domain"
idmsMirrors = `[{"mirrors":["quay.io:443"], "source": "example-domain.io:443", "mirrorSourcePolicy":"NeverContactSource"}]`
// actually quay.io/openshifttest/busybox@sha256:c5439d7db88ab5423999530349d327b04279ad3161d7596d2126dfb5b02bfd1f but using our configured mirror instead
pinnedImage = strings.Replace(BusyBoxImage, "quay.io", "example-domain.io:443", 1)
pinnedImageSetName = "tc-73657-mirror-domain"
)
DigestMirrorTest(oc, mcp, idmsName, idmsMirrors, pinnedImage, pinnedImageSetName)
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-73361-[P2][OnCLayer] Pinnedimageset invalid pinned images [Disruptive]", func() {
var (
invalidPinnedImage = "quay.io/openshiftfake/fakeimage@sha256:0415f56ccc05526f2af5a7ae8654baec97d4a614f24736e8eef41a4591f08019"
pinnedImageSetName = "tc-73361-invalid-pinned-image"
waitForPinned = 10 * time.Minute
)
exutil.By("Pin invalid image")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{invalidPinnedImage})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Check that MCNs are PinnedImageSetDegraded")
for _, node := range mcp.GetNodesOrFail() {
mcn := node.GetMachineConfigNode()
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", TrueString))
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "reason", "PrefetchFailed"))
}
logger.Infof("OK!\n")
exutil.By("Remove the pinnedimageset")
o.Expect(pis.Delete()).To(o.Succeed(), "Error removing %s", pis)
logger.Infof("OK!\n")
exutil.By("Check that MCNs are not PinnedImageSetDegraded anymore")
for _, node := range mcp.GetNodesOrFail() {
mcn := node.GetMachineConfigNode()
o.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", FalseString))
}
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73631-[OnCLayer] Pinned images garbage collection [Disruptive]", func() {
var (
waitForPinned = time.Minute * 5
pinnedImageSetName = "tc-73631-pinned-images-garbage-collector"
gcKubeletConfig = `{"imageMinimumGCAge": "0s", "imageGCHighThresholdPercent": 2, "imageGCLowThresholdPercent": 1}`
kcTemplate = generateTemplateAbsolutePath("generic-kubelet-config.yaml")
kcName = "tc-73631-pinned-garbage-collector"
node = mcp.GetNodesOrFail()[0]
startTime = node.GetDateOrFail()
pinnedImage = NewRemoteImage(node, BusyBoxImage)
manuallyPulledImage = NewRemoteImage(node, AlpineImage)
)
exutil.By("Remove the test images")
_ = pinnedImage.Rmi()
_ = manuallyPulledImage.Rmi()
logger.Infof("OK!\n")
exutil.By("Configure kubelet to start garbage collection")
logger.Infof("Create worker KubeletConfig")
kc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)
defer mcp.waitForComplete()
defer kc.Delete()
kc.create("KUBELETCONFIG="+gcKubeletConfig, "POOL="+mcp.GetName())
exutil.By("Wait for configurations to be applied in worker pool")
mcp.waitForComplete()
logger.Infof("OK!\n")
logger.Infof("Pin image")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImage.ImageName})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Manually pull image")
o.Expect(manuallyPulledImage.Pull()).To(o.Succeed(),
"Error pulling %s", manuallyPulledImage)
logger.Infof("Check that the manually pulled image is not pinned")
o.Expect(manuallyPulledImage.IsPinned()).To(o.BeFalse(),
"Error, %s is pinned, but it should not", manuallyPulledImage)
logger.Infof("OK!\n")
exutil.By("Check that the manually pulled image is garbage collected")
o.Eventually(manuallyPulledImage, "25m", "20s").ShouldNot(Exist(),
"Error, %s has not been garbage collected", manuallyPulledImage)
logger.Infof("OK!\n")
exutil.By("Check that the pinned image is still pinned after garbage collection")
o.Eventually(pinnedImage.IsPinned, "2m", "10s").Should(o.BeTrue(),
"Error, after the garbage collection happened %s is not pinned anymore", pinnedImage)
logger.Infof("OK!\n")
exutil.By("Reboot node")
o.Expect(node.Reboot()).To(o.Succeed(),
"Error rebooting node %s", node.GetName())
o.Eventually(node.GetUptime, "15m", "30s").Should(o.BeTemporally(">", startTime),
"%s was not properly rebooted", node)
logger.Infof("OK!\n")
exutil.By("Check that the pinned image is still pinned after reboot")
o.Expect(pinnedImage.IsPinned()).To(o.BeTrue(),
"Error, after the garbage collection happened %s is not pinned anymore", pinnedImage)
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73635-[P1][OnCLayer] Pod can use pinned images while no access to the registry [Disruptive]", func() {
var (
waitForPinned = time.Minute * 5
pinnedImageSetName = "tc-73635-pinned-images-no-registry"
// We pin the current release's tools image
// if we cannot get the "tools" image it means we are in a disconnected cluster
// and in disconnected clusters openshifttest images are mirrored and need the credentials for the mirror too
// so if we cannot get the "tools" image we can use the "busybox" one.
pinnedImage = getCurrentReleaseInfoImageSpecOrDefault(oc.AsAdmin(), "tools", BusyBoxImage)
allNodes = mcp.GetNodesOrFail()
pullSecret = GetPullSecret(oc.AsAdmin())
deploymentName = "tc-73635-test"
deploymentNamespace = oc.Namespace()
deployment = NewNamespacedResource(oc, "deployment", deploymentNamespace, deploymentName)
scaledReplicas = 5
nodeList = NewNamespacedResourceList(oc, "pod", deploymentNamespace)
)
defer nodeList.PrintDebugCommand() // for debugging purpose in case of failed deployment
exutil.By("Remove the image from all nodes in the pool")
for _, node := range allNodes {
// We ignore errors, since the image can be present or not in the nodes
_ = NewRemoteImage(node, pinnedImage).Rmi()
}
logger.Infof("OK!\n")
exutil.By("Create pinnedimageset")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImage})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that the image was pinned in all nodes in the pool")
for _, node := range allNodes {
ri := NewRemoteImage(node, pinnedImage)
logger.Infof("Checking %s", ri)
o.Expect(ri.IsPinned()).To(o.BeTrue(),
"%s is not pinned, but it should. %s")
}
logger.Infof("OK!\n")
exutil.By("Capture the current pull-secret value")
// We don't use the pullSecret resource directly, instead we use auxiliary functions that will
// extract and restore the secret's values using a file. Like that we can recover the value of the pull-secret
// if our execution goes wrong, without printing it in the logs (for security reasons).
secretFile, err := getPullSecret(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the pull-secret")
logger.Debugf("Pull-secret content stored in file %s", secretFile)
defer func() {
logger.Infof("Restoring initial pull-secret value")
output, err := setDataForPullSecret(oc, secretFile)
if err != nil {
logger.Errorf("Error restoring the pull-secret's value. Error: %s\nOutput: %s", err, output)
}
wMcp.waitForComplete()
mMcp.waitForComplete()
}()
logger.Infof("OK!\n")
exutil.By("Set an empty pull-secret")
o.Expect(pullSecret.SetDataValue(".dockerconfigjson", "{}")).To(o.Succeed(),
"Error setting an empty pull-secret value")
mcp.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Check that the image is pinned")
for _, node := range allNodes {
logger.Infof("Checking node %s", node.GetName())
ri := NewRemoteImage(node, pinnedImage)
o.Expect(ri.IsPinned()).To(o.BeTrue(),
"%s is not pinned, but it should. %s")
}
logger.Infof("OK!\n")
exutil.By("Create test deployment")
defer deployment.Delete()
o.Expect(
NewMCOTemplate(oc.AsAdmin(), "create-deployment.yaml").Create("-p", "NAME="+deploymentName, "IMAGE="+pinnedImage, "NAMESPACE="+deploymentNamespace),
).To(o.Succeed(),
"Error creating the deployment")
o.Eventually(deployment, "6m", "15s").Should(BeAvailable(),
"Resource is NOT available:\n/%s", deployment.PrettyString())
o.Eventually(deployment.Get, "6m", "15s").WithArguments(`{.status.readyReplicas}`).Should(o.Equal(deployment.GetOrFail(`{.spec.replicas}`)),
"Resource is NOT stable, still creating replicas:\n/%s", deployment.PrettyString())
logger.Infof("OK!\n")
exutil.By("Scale app")
o.Expect(
deployment.Patch("merge", fmt.Sprintf(`{"spec":{"replicas":%d}}`, scaledReplicas)),
).To(o.Succeed(),
"Error scaling %s", deployment)
o.Eventually(deployment, "6m", "15s").Should(BeAvailable(),
"Resource is NOT available:\n/%s", deployment.PrettyString())
o.Eventually(deployment.Get, "6m", "15s").WithArguments(`{.status.readyReplicas}`).Should(o.Equal(deployment.GetOrFail(`{.spec.replicas}`)),
"Resource is NOT stable, still creating replicas:\n/%s", deployment.PrettyString())
logger.Infof("OK!\n")
exutil.By("Reboot nodes")
for _, node := range allNodes {
o.Expect(node.Reboot()).To(o.Succeed(), "Error rebooting node %s", node)
}
for _, node := range allNodes {
_, err := node.DebugNodeWithChroot("hostname")
o.Expect(err).NotTo(o.HaveOccurred(), "Node %s was not recovered after rebot", node)
}
logger.Infof("OK!\n")
exutil.By("Check that the applicaion is OK after the reboot")
o.Eventually(deployment, "6m", "15s").Should(BeAvailable(),
"Resource is NOT available:\n/%s", deployment.PrettyString())
o.Eventually(deployment.Get, "6m", "15s").WithArguments(`{.status.readyReplicas}`).Should(o.Equal(deployment.GetOrFail(`{.spec.replicas}`)),
"Resource is NOT stable, still creating replicas:\n/%s", deployment.PrettyString())
logger.Infof("OK!\n")
})
g.It("Author:sregidor-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-Longduration-Medium-73630-[P2][OnCLayer] Pin release images [Disruptive]", func() {
var (
waitForPinned = time.Minute * 30
pinnedImageSetName = "tc-73630-pinned-imageset-release"
pinnedImages = RemoveDuplicates(getReleaseInfoPullspecOrFail(oc.AsAdmin()))
node = mcp.GetNodesOrFail()[0]
minGigasAvailableInNodes = 40
)
skipIfDiskSpaceLessThanBytes(node, "/var/lib/containers/storage/", int64(float64(minGigasAvailableInNodes)*(math.Pow(1024, 3))))
exutil.By("Create pinnedimageset to pin all pullSpec images")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), pinnedImages)
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that all images were pinned")
for _, image := range pinnedImages {
ri := NewRemoteImage(node, image)
o.Expect(ri.IsPinned()).To(o.BeTrue(),
"%s is not pinned, but it should. %s")
}
logger.Infof("OK!\n")
})
g.It("Author:sserafin-NonHyperShiftHOST-NonPreRelease-Longduration-High-73648-[OnCLayer] A rebooted node reconciles with the pinned images status [Disruptive]", func() {
var (
waitForPinned = time.Minute * 5
pinnedImageSetName = "tc-73648-pinned-image"
pinnedImage = BusyBoxImage
allMasters = mMcp.GetNodesOrFail()
pullSecret = GetPullSecret(oc.AsAdmin())
)
exutil.By("Create pinnedimageset")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mMcp.GetName(), []string{pinnedImage})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mMcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mMcp)
logger.Infof("OK!\n")
exutil.By("Check that the image was pinned in all nodes in the pool")
for _, node := range allMasters {
ri := NewRemoteImage(node, pinnedImage)
logger.Infof("Checking %s", ri)
o.Expect(ri.IsPinned()).To(o.BeTrue(),
"%s is not pinned, but it should", ri)
}
logger.Infof("OK!\n")
exutil.By("Capture the current pull-secret value")
secretFile, err := getPullSecret(oc)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the pull-secret")
logger.Debugf("Pull-secret content stored in file %s", secretFile)
defer func() {
logger.Infof("Restoring initial pull-secret value")
output, err := setDataForPullSecret(oc, secretFile)
if err != nil {
logger.Errorf("Error restoring the pull-secret's value. Error: %v\nOutput: %s", err, output)
}
wMcp.waitForComplete()
mMcp.waitForComplete()
}()
logger.Infof("OK!\n")
exutil.By("Set an empty pull-secret")
o.Expect(pullSecret.SetDataValue(".dockerconfigjson", "{}")).To(o.Succeed(),
"Error setting an empty pull-secret value")
mMcp.waitForComplete()
wMcp.waitForComplete()
logger.Infof("OK!\n")
// find the node with the machine-config-controller
exutil.By("Get the mcc node")
var mcc = NewController(oc.AsAdmin())
mccMaster, err := mcc.GetNode()
o.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the node where the MCO controller is running")
logger.Infof("OK!\n")
// reboot the node with mcc
exutil.By("Reboot node")
startTime := mccMaster.GetDateOrFail()
o.Expect(mccMaster.Reboot()).To(o.Succeed(), "Error rebooting node %s", mccMaster)
logger.Infof("OK!\n")
// delete the pinnedImageSet
exutil.By("Delete the pinnedimageset")
o.Eventually(pis.Delete, "13m", "20s").ShouldNot(o.HaveOccurred(), "Error deleting pinnedimageset %s", pis)
logger.Infof("OK!\n")
// wait for the rebooted node
exutil.By("Wait for the rebooted node")
o.Eventually(mccMaster.GetUptime, "15m", "30s").Should(o.BeTemporally(">", startTime),
"%s was not properly rebooted", mccMaster)
mMcp.waitForComplete()
o.Expect(mMcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mMcp)
logger.Infof("OK!\n")
// check pinned imageset is deleted in all nodes in the pool
exutil.By("Check that the images are not pinned in all nodes in the pool")
for _, node := range allMasters {
ri := NewRemoteImage(node, pinnedImage)
logger.Infof("Checking %s", ri)
o.Eventually(ri.IsPinned, "5m", "20s").Should(o.BeFalse(),
"%s is pinned, but it should not", ri)
}
logger.Infof("OK!\n")
})
})
// getReleaseInfoPullspecOrFail returns a list of strings containing the names of the pullspec images
func getReleaseInfoPullspecOrFail(oc *exutil.CLI) []string {
mMcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
master := mMcp.GetNodesOrFail()[0]
remoteAdminKubeConfig := fmt.Sprintf("/root/remoteKubeConfig-%s", exutil.GetRandomString())
adminKubeConfig := exutil.KubeConfigPath()
defer master.RemoveFile(remoteAdminKubeConfig)
o.Expect(master.CopyFromLocal(adminKubeConfig, remoteAdminKubeConfig)).To(o.Succeed(),
"Error copying kubeconfig file to master node")
releaseInfoCommand := fmt.Sprintf("oc adm release info -o pullspec --registry-config /var/lib/kubelet/config.json --kubeconfig %s", remoteAdminKubeConfig)
stdout, _, err := master.DebugNodeWithChrootStd("sh", "-c", "set -a; source /etc/mco/proxy.env; "+releaseInfoCommand)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting image release pull specs")
return strings.Split(stdout, "\n")
}
// skipIfDiskSpaceLessThanBytes skip test case if there is less than minNumBytes space available in the given path
func skipIfDiskSpaceLessThanBytes(node Node, path string, minNumBytes int64) {
diskUsage, err := node.GetFileSystemSpaceUsage(path)
o.Expect(err).NotTo(o.HaveOccurred(),
"Cannot get the disk usage in node %s", node.GetName())
if minNumBytes > diskUsage.Avail {
g.Skip(fmt.Sprintf("Available diskspace in %s is %d bytes, which is less than the required %d bytes",
node.GetName(), diskUsage.Avail, minNumBytes))
}
logger.Infof("Required disk space %d bytes, available disk space %d", minNumBytes, diskUsage.Avail)
}
// getCurrentReleaseInfoImageSpecOrDefault returns the image spec for the given image in the release. If there is any error, it returns the given default image
// In disconnected clusters the release image is mirrored. Unfortunately, "oc adm release info" command does not take /etc/containers/registries.conf mirrors into account,
// hence in disconnected clusters we cannot get the release image specs unless we apply the mirror manually.
// TODO: When the "oc adm release info" command spec fails:
// 1. parse the output to get the release image name
// 2. search in all imagecontentsourcepolicies and all imagedigestimirrorsets if there is any mirror for the release image (it should)
// 3. use the mirror manually to get the image specs
func getCurrentReleaseInfoImageSpecOrDefault(oc *exutil.CLI, imageName, defaultImageName string) string {
image, err := getCurrentReleaseInfoImageSpec(oc, imageName)
if err != nil {
return defaultImageName
}
return image
}
// getCurrentReleaseInfoImageSpec returns the image spec for the given image in the release
func getCurrentReleaseInfoImageSpec(oc *exutil.CLI, imageName string) (string, error) {
mMcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
allNodes, err := mMcp.GetNodes()
if err != nil {
return "", err
}
master := allNodes[0]
remoteAdminKubeConfig := fmt.Sprintf("/root/remoteKubeConfig-%s", exutil.GetRandomString())
adminKubeConfig := exutil.KubeConfigPath()
defer master.RemoveFile(remoteAdminKubeConfig)
err = master.CopyFromLocal(adminKubeConfig, remoteAdminKubeConfig)
if err != nil {
return "", err
}
stdout, _, err := master.DebugNodeWithChrootStd("oc", "adm", "release", "info", "--image-for", imageName, "--registry-config", "/var/lib/kubelet/config.json", "--kubeconfig", remoteAdminKubeConfig)
if err != nil {
return "", err
}
return stdout, nil
}
// DigestMirrorTest generic instructions for DigestImageMirrorSet tests
func DigestMirrorTest(oc *exutil.CLI, mcp *MachineConfigPool, idmsName, idmsMirrors, pinnedImage, pinnedImageSetName string) {
var (
allNodes = mcp.GetNodesOrFail()
waitForPinned = 10 * time.Minute
mcpsList = NewMachineConfigPoolList(oc.AsAdmin())
)
exutil.By("Remove the image from all nodes in the pool")
for _, node := range allNodes {
// We ignore errors, since the image can be present or not in the nodes
_ = NewRemoteImage(node, pinnedImage).Rmi()
}
logger.Infof("OK!\n")
exutil.By("Create new machine config to deploy a ImageDigestMirrorSet configuring a mirror registry")
idms := NewImageDigestMirrorSet(oc.AsAdmin(), idmsName, *NewMCOTemplate(oc, "add-image-digest-mirror-set.yaml"))
defer mcpsList.waitForComplete() // An ImageDisgestMirrorSet resource impacts all the pools in the cluster
defer idms.Delete()
idms.Create("-p", "NAME="+idmsName, "IMAGEDIGESTMIRRORS="+idmsMirrors)
mcpsList.waitForComplete()
logger.Infof("OK!\n")
exutil.By("Pin the mirrored image")
pis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImage})
o.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)
defer pis.DeleteAndWait(waitForPinned)
logger.Infof("OK!\n")
exutil.By("Wait for all images to be pinned")
o.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)
logger.Infof("OK!\n")
exutil.By("Check that the image is pinned")
for _, node := range allNodes {
ri := NewRemoteImage(node, pinnedImage)
logger.Infof("Checking %s", ri)
o.Expect(ri.IsPinned()).To(o.BeTrue(),
"%s is not pinned, but it should. %s")
}
logger.Infof("OK!\n")
}
|
package mco
| ||||
function
|
openshift/openshift-tests-private
|
1a78f3fc-e1e3-40d5-8b65-71306743e4e9
|
getReleaseInfoPullspecOrFail
|
['"fmt"', '"strings"']
|
github.com/openshift/openshift-tests-private/test/extended/mco/mco_pinnedimages.go
|
func getReleaseInfoPullspecOrFail(oc *exutil.CLI) []string {
mMcp := NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)
master := mMcp.GetNodesOrFail()[0]
remoteAdminKubeConfig := fmt.Sprintf("/root/remoteKubeConfig-%s", exutil.GetRandomString())
adminKubeConfig := exutil.KubeConfigPath()
defer master.RemoveFile(remoteAdminKubeConfig)
o.Expect(master.CopyFromLocal(adminKubeConfig, remoteAdminKubeConfig)).To(o.Succeed(),
"Error copying kubeconfig file to master node")
releaseInfoCommand := fmt.Sprintf("oc adm release info -o pullspec --registry-config /var/lib/kubelet/config.json --kubeconfig %s", remoteAdminKubeConfig)
stdout, _, err := master.DebugNodeWithChrootStd("sh", "-c", "set -a; source /etc/mco/proxy.env; "+releaseInfoCommand)
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting image release pull specs")
return strings.Split(stdout, "\n")
}
|
{'_': 'g.Describe("[sig-mco] MCO Pinnedimages", func() {\n\tdefer g.GinkgoRecover()\n\n\tvar (\n\t\toc = exutil.NewCLI("mco-pinnedimages", exutil.KubeConfigPath())\n\t\twMcp *MachineConfigPool\n\t\tmMcp *MachineConfigPool\n\t\t// Compact compatible MCP. If the node is compact/SNO this variable will be the master pool, else it will be the worker pool\n\t\tmcp *MachineConfigPool\n\t)\n\n\tg.JustBeforeEach(func() {\n\t\t// The pinnedimageset feature is currently only supported in techpreview\n\t\tskipIfNoTechPreview(oc.AsAdmin())\n\n\t\twMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolWorker)\n\t\tmMcp = NewMachineConfigPool(oc.AsAdmin(), MachineConfigPoolMaster)\n\t\tmcp = GetCompactCompatiblePool(oc.AsAdmin())\n\t\tlogger.Infof("%s %s %s", wMcp, mMcp, mcp)\n\n\t\tpreChecks(oc)\n\t})\n\n\tg.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73659-[P1][OnCLayer] Pinned images when disk-pressure [Disruptive]", func() {\n\t\tvar (\n\t\t\twaitForPinned = time.Minute * 5\n\t\t\tpinnedImageSetName = "tc-73659-pin-images-disk-pressure"\n\t\t\tpinnedImageName = BusyBoxImage\n\t\t\tallNodes = mcp.GetNodesOrFail()\n\t\t\tnode = allNodes[0]\n\t\t\tcleanFileTimedService = generateTemplateAbsolutePath("tc-73659-clean-file-timed.service")\n\t\t\tcleanFileTimedServiceDestinationPath = "/etc/systemd/system/tc-73659-clean-file-timed.service"\n\t\t)\n\n\t\texutil.By("Get disk usage in node")\n\t\tdiskUsage, err := node.GetFileSystemSpaceUsage("/var/lib/containers/storage/")\n\t\to.Expect(err).NotTo(o.HaveOccurred(),\n\t\t\t"Cannot get the disk usage in node %s", node.GetName())\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Create a timed service that will restore the original disk usage after 5 minutes")\n\t\tlogger.Infof("Copy the service in the node")\n\t\tdefer node.DebugNodeWithChroot("rm", cleanFileTimedServiceDestinationPath)\n\t\to.Expect(node.CopyFromLocal(cleanFileTimedService, cleanFileTimedServiceDestinationPath)).\n\t\t\tNotTo(o.HaveOccurred(),\n\t\t\t\t"Error copying %s to %s in node %s", cleanFileTimedService, cleanFileTimedServiceDestinationPath, node.GetName())\n\t\t// We create transient timer that will execute the sercive, this service will restore the disk usage to its original usage\n\t\tlogger.Infof("Create a transient timer to execute the service after 5 mintues")\n\t\t// If an error happens, the transient timer will not be deleted unless we execute this command\n\t\tdefer node.DebugNodeWithChroot("systemctl", "reset-failed", "tc-73659-clean-file-timed.service")\n\t\tdefer node.DebugNodeWithChroot("systemctl", "stop", "tc-73659-clean-file-timed.service")\n\t\t_, err = node.DebugNodeWithChroot("systemd-run", `--on-active=5minutes`, `--unit=tc-73659-clean-file-timed.service`)\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error creating the transient timer")\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Create a huge file so that the node reports disk pressure. Use about 90 per cent of the free space in the disk")\n\t\tfileSize := ((diskUsage.Avail + diskUsage.Used) * 9 / 10) - diskUsage.Used // calculate the file size to use a 90% of the disk space\n\t\to.Expect(fileSize).To(o.And(\n\t\t\to.BeNumerically("<", diskUsage.Avail),\n\t\t\to.BeNumerically(">", 0)),\n\t\t\t"Error not enough space on device to execute this test. Available: %d, Used %d", diskUsage.Avail, diskUsage.Used)\n\t\t_, err = node.DebugNodeWithChroot("fallocate", "-l", fmt.Sprintf("%d", fileSize), "/var/lib/containers/storage/tc-73659-huge-test-file.file")\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error creating a file to trigger disk pressure")\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Wait for disk pressure to be reported")\n\t\t// It makes no sense to wait longer than the 5 minutes time out that we use to fix the disk usage.\n\t\t// If we need to increse this timeout, we need to increase the transiente timer too\n\t\to.Eventually(&node, "5m", "20s").Should(HaveConditionField("DiskPressure", "status", TrueString),\n\t\t\t"Node is not reporting DiskPressure, but it should.\\n%s", node.PrettyString())\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Pin images")\n\t\tpis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImageName})\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)\n\t\tdefer pis.DeleteAndWait(waitForPinned)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check the degraded status")\n\t\tlogger.Infof("Check that the node with disk pressure is reporting pinnedimagesetdegraded status")\n\t\tmcn := node.GetMachineConfigNode()\n\t\to.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", TrueString),\n\t\t\t"MachineConfigNode was not degraded.\\n%s\\n%s", mcn.PrettyString(), node.PrettyString())\n\t\to.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "reason", "PrefetchFailed"),\n\t\t\t"MachineConfigNode was not degraded with the expected reason.\\n%s\\n%s", mcn.PrettyString(), node.PrettyString())\n\t\to.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "message", `node `+node.GetName()+` is reporting OutOfDisk=True`),\n\t\t\t"MachineConfigNode was not degraded with the expected message.\\n%s\\n%s", mcn.PrettyString(), node.PrettyString())\n\t\tlogger.Infof("Check that the rest of the nodes could pin the image and are not degraded")\n\t\tfor _, n := range allNodes {\n\t\t\tif n.GetName() != node.GetName() {\n\t\t\t\tlogger.Infof("Checking node %s", n.GetName())\n\t\t\t\to.Eventually(n.GetMachineConfigNode, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", FalseString),\n\t\t\t\t\t"MachineConfigNode was degraded.\\n%s\\n%s", node.GetMachineConfigNode().PrettyString(), node.PrettyString())\n\t\t\t\trmi := NewRemoteImage(n, pinnedImageName)\n\t\t\t\to.Eventually(rmi.IsPinned, "5m", "20s").Should(o.BeTrue(), "%s should be pinned but it is not", rmi)\n\t\t\t}\n\t\t}\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Wait for disk pressure to be fixed") // It should be fixed by the timed service that was created before\n\t\to.Eventually(&node, "20m", "20s").Should(HaveConditionField("DiskPressure", "status", FalseString),\n\t\t\t"Node is reporting DiskPressure, but it should not.\\n%s", node.PrettyString())\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that the degraded status was fixed")\n\t\to.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", FalseString),\n\t\t\t"MachineConfigNode was not degraded.\\n%s\\n%s", mcn.PrettyString(), node.PrettyString())\n\t\to.Eventually(NewRemoteImage(node, pinnedImageName).IsPinned, "2m", "20s").Should(o.BeTrue(),\n\t\t\t"The degraded status was fixed, but the image was not pinned")\n\t\tlogger.Infof("OK!\\n")\n\t})\n\n\tg.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-High-73623-[P2][OnCLayer] Pin images [Disruptive]", func() {\n\t\tvar (\n\t\t\twaitForPinned = time.Minute * 15\n\t\t\tpinnedImageSetName = "tc-73623-pin-images"\n\t\t\tnode = mcp.GetNodesOrFail()[0]\n\t\t\tfirstPinnedImage = NewRemoteImage(node, BusyBoxImage)\n\t\t\tsecondPinnedImage = NewRemoteImage(node, AlpineImage)\n\t\t)\n\n\t\texutil.By("Remove images")\n\t\t_ = firstPinnedImage.Rmi()\n\t\t_ = secondPinnedImage.Rmi()\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Pin images")\n\t\tpis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{firstPinnedImage.ImageName, secondPinnedImage.ImageName})\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)\n\t\tdefer pis.DeleteAndWait(waitForPinned)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Wait for all images to be pinned")\n\t\to.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that the images are pinned")\n\t\to.Expect(firstPinnedImage.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", firstPinnedImage)\n\t\to.Expect(secondPinnedImage.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", secondPinnedImage)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Patch the pinnedimageset and remove one image")\n\t\to.Expect(\n\t\t\tpis.Patch("json", fmt.Sprintf(`[{"op": "replace", "path": "/spec/pinnedImages", "value": [{"name": "%s"}]}]`, firstPinnedImage.ImageName)),\n\t\t).To(o.Succeed(),\n\t\t\t"Error patching %s to remove one image")\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Wait for the pinnedimageset changes to be applied")\n\t\to.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that only the image reamaining in the pinnedimageset is pinned")\n\t\to.Expect(firstPinnedImage.IsPinned()).To(o.BeTrue(), "%s is not pinned, but it should", firstPinnedImage)\n\t\to.Expect(secondPinnedImage.IsPinned()).To(o.BeFalse(), "%s is pinned, but it should NOT", secondPinnedImage)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Remove the pinnedimageset")\n\t\to.Expect(pis.Delete()).To(o.Succeed(), "Error removing %s", pis)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Wait for the pinnedimageset removal to be applied")\n\t\to.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that only the image reamaining in the pinnedimageset is pinned")\n\t\to.Expect(firstPinnedImage.IsPinned()).To(o.BeFalse(), "%s is pinned, but it should NOT", firstPinnedImage)\n\t\to.Expect(secondPinnedImage.IsPinned()).To(o.BeFalse(), "%s is pinned, but it should NOT", secondPinnedImage)\n\t\tlogger.Infof("OK!\\n")\n\t})\n\n\t// Disconnected clusters use an imagecontentsourcepolicy to mirror the images in openshittest. In this test cases we create an ImageDigestMirrorSet to mirror the same images and it is not supported\n\t// Hence we skip this test case in disconnected clusters\n\tg.It("Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-High-73653-[OnCLayer] Pinned images with a ImageDigestMirrorSet mirroring a single repository [Disruptive]", func() {\n\t\tvar (\n\t\t\tidmsName = "tc-73653-mirror-single-repository"\n\t\t\tidmsMirrors = `[{"mirrors":["quay.io/openshifttest/busybox"], "source": "example-repo.io/digest-example/mybusy", "mirrorSourcePolicy":"NeverContactSource"}]`\n\t\t\t// actually quay.io/openshifttest/busybox@sha256:c5439d7db88ab5423999530349d327b04279ad3161d7596d2126dfb5b02bfd1f but using our configured mirror instead\n\t\t\tpinnedImage = strings.Replace(BusyBoxImage, "quay.io/openshifttest/busybox", "example-repo.io/digest-example/mybusy", 1)\n\t\t\tpinnedImageSetName = "tc-73653-mirror-single-repository"\n\t\t)\n\n\t\tDigestMirrorTest(oc, mcp, idmsName, idmsMirrors, pinnedImage, pinnedImageSetName)\n\t})\n\n\t// Disconnected clusters use an imagecontentsourcepolicy to mirror the images in openshittest. In this test cases we create an ImageDigestMirrorSet to mirror the same images and it is not supported\n\t// Hence we skip this test case in disconnected clusters\n\tg.It("Author:sregidor-ConnectedOnly-NonHyperShiftHOST-NonPreRelease-Longduration-High-73657-[P1][OnCLayer] Pinned images with a ImageDigestMirrorSet mirroring a domain [Disruptive]", func() {\n\t\tvar (\n\t\t\tidmsName = "tc-73657-mirror-domain"\n\t\t\tidmsMirrors = `[{"mirrors":["quay.io:443"], "source": "example-domain.io:443", "mirrorSourcePolicy":"NeverContactSource"}]`\n\t\t\t// actually quay.io/openshifttest/busybox@sha256:c5439d7db88ab5423999530349d327b04279ad3161d7596d2126dfb5b02bfd1f but using our configured mirror instead\n\t\t\tpinnedImage = strings.Replace(BusyBoxImage, "quay.io", "example-domain.io:443", 1)\n\t\t\tpinnedImageSetName = "tc-73657-mirror-domain"\n\t\t)\n\n\t\tDigestMirrorTest(oc, mcp, idmsName, idmsMirrors, pinnedImage, pinnedImageSetName)\n\t})\n\n\tg.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Medium-73361-[P2][OnCLayer] Pinnedimageset invalid pinned images [Disruptive]", func() {\n\t\tvar (\n\t\t\tinvalidPinnedImage = "quay.io/openshiftfake/fakeimage@sha256:0415f56ccc05526f2af5a7ae8654baec97d4a614f24736e8eef41a4591f08019"\n\t\t\tpinnedImageSetName = "tc-73361-invalid-pinned-image"\n\t\t\twaitForPinned = 10 * time.Minute\n\t\t)\n\n\t\texutil.By("Pin invalid image")\n\t\tpis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{invalidPinnedImage})\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)\n\t\tdefer pis.DeleteAndWait(waitForPinned)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that MCNs are PinnedImageSetDegraded")\n\t\tfor _, node := range mcp.GetNodesOrFail() {\n\t\t\tmcn := node.GetMachineConfigNode()\n\t\t\to.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", TrueString))\n\t\t\to.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "reason", "PrefetchFailed"))\n\t\t}\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Remove the pinnedimageset")\n\t\to.Expect(pis.Delete()).To(o.Succeed(), "Error removing %s", pis)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that MCNs are not PinnedImageSetDegraded anymore")\n\t\tfor _, node := range mcp.GetNodesOrFail() {\n\t\t\tmcn := node.GetMachineConfigNode()\n\t\t\to.Eventually(mcn, "2m", "20s").Should(HaveConditionField("PinnedImageSetsDegraded", "status", FalseString))\n\t\t}\n\t\tlogger.Infof("OK!\\n")\n\t})\n\n\tg.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73631-[OnCLayer] Pinned images garbage collection [Disruptive]", func() {\n\t\tvar (\n\t\t\twaitForPinned = time.Minute * 5\n\t\t\tpinnedImageSetName = "tc-73631-pinned-images-garbage-collector"\n\t\t\tgcKubeletConfig = `{"imageMinimumGCAge": "0s", "imageGCHighThresholdPercent": 2, "imageGCLowThresholdPercent": 1}`\n\t\t\tkcTemplate = generateTemplateAbsolutePath("generic-kubelet-config.yaml")\n\t\t\tkcName = "tc-73631-pinned-garbage-collector"\n\t\t\tnode = mcp.GetNodesOrFail()[0]\n\t\t\tstartTime = node.GetDateOrFail()\n\t\t\tpinnedImage = NewRemoteImage(node, BusyBoxImage)\n\t\t\tmanuallyPulledImage = NewRemoteImage(node, AlpineImage)\n\t\t)\n\n\t\texutil.By("Remove the test images")\n\t\t_ = pinnedImage.Rmi()\n\t\t_ = manuallyPulledImage.Rmi()\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Configure kubelet to start garbage collection")\n\t\tlogger.Infof("Create worker KubeletConfig")\n\t\tkc := NewKubeletConfig(oc.AsAdmin(), kcName, kcTemplate)\n\t\tdefer mcp.waitForComplete()\n\t\tdefer kc.Delete()\n\t\tkc.create("KUBELETCONFIG="+gcKubeletConfig, "POOL="+mcp.GetName())\n\n\t\texutil.By("Wait for configurations to be applied in worker pool")\n\t\tmcp.waitForComplete()\n\t\tlogger.Infof("OK!\\n")\n\n\t\tlogger.Infof("Pin image")\n\t\tpis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImage.ImageName})\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)\n\t\tdefer pis.DeleteAndWait(waitForPinned)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Wait for all images to be pinned")\n\t\to.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Manually pull image")\n\t\to.Expect(manuallyPulledImage.Pull()).To(o.Succeed(),\n\t\t\t"Error pulling %s", manuallyPulledImage)\n\t\tlogger.Infof("Check that the manually pulled image is not pinned")\n\t\to.Expect(manuallyPulledImage.IsPinned()).To(o.BeFalse(),\n\t\t\t"Error, %s is pinned, but it should not", manuallyPulledImage)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that the manually pulled image is garbage collected")\n\t\to.Eventually(manuallyPulledImage, "25m", "20s").ShouldNot(Exist(),\n\t\t\t"Error, %s has not been garbage collected", manuallyPulledImage)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that the pinned image is still pinned after garbage collection")\n\t\to.Eventually(pinnedImage.IsPinned, "2m", "10s").Should(o.BeTrue(),\n\t\t\t"Error, after the garbage collection happened %s is not pinned anymore", pinnedImage)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Reboot node")\n\t\to.Expect(node.Reboot()).To(o.Succeed(),\n\t\t\t"Error rebooting node %s", node.GetName())\n\t\to.Eventually(node.GetUptime, "15m", "30s").Should(o.BeTemporally(">", startTime),\n\t\t\t"%s was not properly rebooted", node)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that the pinned image is still pinned after reboot")\n\t\to.Expect(pinnedImage.IsPinned()).To(o.BeTrue(),\n\t\t\t"Error, after the garbage collection happened %s is not pinned anymore", pinnedImage)\n\t\tlogger.Infof("OK!\\n")\n\t})\n\n\tg.It("Author:sregidor-NonHyperShiftHOST-NonPreRelease-Longduration-Medium-73635-[P1][OnCLayer] Pod can use pinned images while no access to the registry [Disruptive]", func() {\n\t\tvar (\n\t\t\twaitForPinned = time.Minute * 5\n\t\t\tpinnedImageSetName = "tc-73635-pinned-images-no-registry"\n\t\t\t// We pin the current release\'s tools image\n\t\t\t// if we cannot get the "tools" image it means we are in a disconnected cluster\n\t\t\t// and in disconnected clusters openshifttest images are mirrored and need the credentials for the mirror too\n\t\t\t// so if we cannot get the "tools" image we can use the "busybox" one.\n\t\t\tpinnedImage = getCurrentReleaseInfoImageSpecOrDefault(oc.AsAdmin(), "tools", BusyBoxImage)\n\t\t\tallNodes = mcp.GetNodesOrFail()\n\t\t\tpullSecret = GetPullSecret(oc.AsAdmin())\n\n\t\t\tdeploymentName = "tc-73635-test"\n\t\t\tdeploymentNamespace = oc.Namespace()\n\t\t\tdeployment = NewNamespacedResource(oc, "deployment", deploymentNamespace, deploymentName)\n\t\t\tscaledReplicas = 5\n\t\t\tnodeList = NewNamespacedResourceList(oc, "pod", deploymentNamespace)\n\t\t)\n\t\tdefer nodeList.PrintDebugCommand() // for debugging purpose in case of failed deployment\n\n\t\texutil.By("Remove the image from all nodes in the pool")\n\t\tfor _, node := range allNodes {\n\t\t\t// We ignore errors, since the image can be present or not in the nodes\n\t\t\t_ = NewRemoteImage(node, pinnedImage).Rmi()\n\t\t}\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Create pinnedimageset")\n\t\tpis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), []string{pinnedImage})\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)\n\t\tdefer pis.DeleteAndWait(waitForPinned)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Wait for all images to be pinned")\n\t\to.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that the image was pinned in all nodes in the pool")\n\t\tfor _, node := range allNodes {\n\t\t\tri := NewRemoteImage(node, pinnedImage)\n\t\t\tlogger.Infof("Checking %s", ri)\n\t\t\to.Expect(ri.IsPinned()).To(o.BeTrue(),\n\t\t\t\t"%s is not pinned, but it should. %s")\n\t\t}\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Capture the current pull-secret value")\n\t\t// We don\'t use the pullSecret resource directly, instead we use auxiliary functions that will\n\t\t// extract and restore the secret\'s values using a file. Like that we can recover the value of the pull-secret\n\t\t// if our execution goes wrong, without printing it in the logs (for security reasons).\n\t\tsecretFile, err := getPullSecret(oc)\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error getting the pull-secret")\n\t\tlogger.Debugf("Pull-secret content stored in file %s", secretFile)\n\t\tdefer func() {\n\t\t\tlogger.Infof("Restoring initial pull-secret value")\n\t\t\toutput, err := setDataForPullSecret(oc, secretFile)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorf("Error restoring the pull-secret\'s value. Error: %s\\nOutput: %s", err, output)\n\t\t\t}\n\t\t\twMcp.waitForComplete()\n\t\t\tmMcp.waitForComplete()\n\t\t}()\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Set an empty pull-secret")\n\t\to.Expect(pullSecret.SetDataValue(".dockerconfigjson", "{}")).To(o.Succeed(),\n\t\t\t"Error setting an empty pull-secret value")\n\t\tmcp.waitForComplete()\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that the image is pinned")\n\t\tfor _, node := range allNodes {\n\t\t\tlogger.Infof("Checking node %s", node.GetName())\n\t\t\tri := NewRemoteImage(node, pinnedImage)\n\t\t\to.Expect(ri.IsPinned()).To(o.BeTrue(),\n\t\t\t\t"%s is not pinned, but it should. %s")\n\t\t}\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Create test deployment")\n\t\tdefer deployment.Delete()\n\t\to.Expect(\n\t\t\tNewMCOTemplate(oc.AsAdmin(), "create-deployment.yaml").Create("-p", "NAME="+deploymentName, "IMAGE="+pinnedImage, "NAMESPACE="+deploymentNamespace),\n\t\t).To(o.Succeed(),\n\t\t\t"Error creating the deployment")\n\t\to.Eventually(deployment, "6m", "15s").Should(BeAvailable(),\n\t\t\t"Resource is NOT available:\\n/%s", deployment.PrettyString())\n\t\to.Eventually(deployment.Get, "6m", "15s").WithArguments(`{.status.readyReplicas}`).Should(o.Equal(deployment.GetOrFail(`{.spec.replicas}`)),\n\t\t\t"Resource is NOT stable, still creating replicas:\\n/%s", deployment.PrettyString())\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Scale app")\n\t\to.Expect(\n\t\t\tdeployment.Patch("merge", fmt.Sprintf(`{"spec":{"replicas":%d}}`, scaledReplicas)),\n\t\t).To(o.Succeed(),\n\t\t\t"Error scaling %s", deployment)\n\t\to.Eventually(deployment, "6m", "15s").Should(BeAvailable(),\n\t\t\t"Resource is NOT available:\\n/%s", deployment.PrettyString())\n\t\to.Eventually(deployment.Get, "6m", "15s").WithArguments(`{.status.readyReplicas}`).Should(o.Equal(deployment.GetOrFail(`{.spec.replicas}`)),\n\t\t\t"Resource is NOT stable, still creating replicas:\\n/%s", deployment.PrettyString())\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Reboot nodes")\n\t\tfor _, node := range allNodes {\n\t\t\to.Expect(node.Reboot()).To(o.Succeed(), "Error rebooting node %s", node)\n\t\t}\n\t\tfor _, node := range allNodes {\n\t\t\t_, err := node.DebugNodeWithChroot("hostname")\n\t\t\to.Expect(err).NotTo(o.HaveOccurred(), "Node %s was not recovered after rebot", node)\n\t\t}\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that the applicaion is OK after the reboot")\n\t\to.Eventually(deployment, "6m", "15s").Should(BeAvailable(),\n\t\t\t"Resource is NOT available:\\n/%s", deployment.PrettyString())\n\t\to.Eventually(deployment.Get, "6m", "15s").WithArguments(`{.status.readyReplicas}`).Should(o.Equal(deployment.GetOrFail(`{.spec.replicas}`)),\n\t\t\t"Resource is NOT stable, still creating replicas:\\n/%s", deployment.PrettyString())\n\t\tlogger.Infof("OK!\\n")\n\t})\n\n\tg.It("Author:sregidor-NonHyperShiftHOST-ConnectedOnly-NonPreRelease-Longduration-Medium-73630-[P2][OnCLayer] Pin release images [Disruptive]", func() {\n\t\tvar (\n\t\t\twaitForPinned = time.Minute * 30\n\t\t\tpinnedImageSetName = "tc-73630-pinned-imageset-release"\n\t\t\tpinnedImages = RemoveDuplicates(getReleaseInfoPullspecOrFail(oc.AsAdmin()))\n\t\t\tnode = mcp.GetNodesOrFail()[0]\n\t\t\tminGigasAvailableInNodes = 40\n\t\t)\n\n\t\tskipIfDiskSpaceLessThanBytes(node, "/var/lib/containers/storage/", int64(float64(minGigasAvailableInNodes)*(math.Pow(1024, 3))))\n\n\t\texutil.By("Create pinnedimageset to pin all pullSpec images")\n\t\tpis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mcp.GetName(), pinnedImages)\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)\n\t\tdefer pis.DeleteAndWait(waitForPinned)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Wait for all images to be pinned")\n\t\to.Expect(mcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mcp)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that all images were pinned")\n\t\tfor _, image := range pinnedImages {\n\t\t\tri := NewRemoteImage(node, image)\n\t\t\to.Expect(ri.IsPinned()).To(o.BeTrue(),\n\t\t\t\t"%s is not pinned, but it should. %s")\n\t\t}\n\t\tlogger.Infof("OK!\\n")\n\n\t})\n\n\tg.It("Author:sserafin-NonHyperShiftHOST-NonPreRelease-Longduration-High-73648-[OnCLayer] A rebooted node reconciles with the pinned images status [Disruptive]", func() {\n\t\tvar (\n\t\t\twaitForPinned = time.Minute * 5\n\t\t\tpinnedImageSetName = "tc-73648-pinned-image"\n\t\t\tpinnedImage = BusyBoxImage\n\t\t\tallMasters = mMcp.GetNodesOrFail()\n\t\t\tpullSecret = GetPullSecret(oc.AsAdmin())\n\t\t)\n\n\t\texutil.By("Create pinnedimageset")\n\t\tpis, err := CreateGenericPinnedImageSet(oc.AsAdmin(), pinnedImageSetName, mMcp.GetName(), []string{pinnedImage})\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error creating pinnedimageset %s", pis)\n\t\tdefer pis.DeleteAndWait(waitForPinned)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Wait for all images to be pinned")\n\t\to.Expect(mMcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mMcp)\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Check that the image was pinned in all nodes in the pool")\n\t\tfor _, node := range allMasters {\n\t\t\tri := NewRemoteImage(node, pinnedImage)\n\t\t\tlogger.Infof("Checking %s", ri)\n\t\t\to.Expect(ri.IsPinned()).To(o.BeTrue(),\n\t\t\t\t"%s is not pinned, but it should", ri)\n\t\t}\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Capture the current pull-secret value")\n\t\tsecretFile, err := getPullSecret(oc)\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Error getting the pull-secret")\n\t\tlogger.Debugf("Pull-secret content stored in file %s", secretFile)\n\t\tdefer func() {\n\t\t\tlogger.Infof("Restoring initial pull-secret value")\n\t\t\toutput, err := setDataForPullSecret(oc, secretFile)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorf("Error restoring the pull-secret\'s value. Error: %v\\nOutput: %s", err, output)\n\t\t\t}\n\t\t\twMcp.waitForComplete()\n\t\t\tmMcp.waitForComplete()\n\t\t}()\n\t\tlogger.Infof("OK!\\n")\n\n\t\texutil.By("Set an empty pull-secret")\n\t\to.Expect(pullSecret.SetDataValue(".dockerconfigjson", "{}")).To(o.Succeed(),\n\t\t\t"Error setting an empty pull-secret value")\n\t\tmMcp.waitForComplete()\n\t\twMcp.waitForComplete()\n\t\tlogger.Infof("OK!\\n")\n\n\t\t// find the node with the machine-config-controller\n\t\texutil.By("Get the mcc node")\n\t\tvar mcc = NewController(oc.AsAdmin())\n\t\tmccMaster, err := mcc.GetNode()\n\t\to.Expect(err).NotTo(o.HaveOccurred(), "Cannot get the node where the MCO controller is running")\n\t\tlogger.Infof("OK!\\n")\n\n\t\t// reboot the node with mcc\n\t\texutil.By("Reboot node")\n\t\tstartTime := mccMaster.GetDateOrFail()\n\t\to.Expect(mccMaster.Reboot()).To(o.Succeed(), "Error rebooting node %s", mccMaster)\n\t\tlogger.Infof("OK!\\n")\n\n\t\t// delete the pinnedImageSet\n\t\texutil.By("Delete the pinnedimageset")\n\t\to.Eventually(pis.Delete, "13m", "20s").ShouldNot(o.HaveOccurred(), "Error deleting pinnedimageset %s", pis)\n\t\tlogger.Infof("OK!\\n")\n\n\t\t// wait for the rebooted node\n\t\texutil.By("Wait for the rebooted node")\n\t\to.Eventually(mccMaster.GetUptime, "15m", "30s").Should(o.BeTemporally(">", startTime),\n\t\t\t"%s was not properly rebooted", mccMaster)\n\t\tmMcp.waitForComplete()\n\t\to.Expect(mMcp.waitForPinComplete(waitForPinned)).To(o.Succeed(), "Pinned image operation is not completed in %s", mMcp)\n\t\tlogger.Infof("OK!\\n")\n\n\t\t// check pinned imageset is deleted in all nodes in the pool\n\t\texutil.By("Check that the images are not pinned in all nodes in the pool")\n\t\tfor _, node := range allMasters {\n\t\t\tri := NewRemoteImage(node, pinnedImage)\n\t\t\tlogger.Infof("Checking %s", ri)\n\t\t\to.Eventually(ri.IsPinned, "5m", "20s").Should(o.BeFalse(),\n\t\t\t\t"%s is pinned, but it should not", ri)\n\t\t}\n\t\tlogger.Infof("OK!\\n")\n\t})\n})'}
|
mco
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.