element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case | openshift/openshift-tests-private | e3fd5b9f-b896-4d1f-a515-2f2fa0ea45d0 | Longduration-Author:tbuskey-High-53583-upgrade osc operator by changing subscription [Disruptive][Serial] | ['"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Longduration-Author:tbuskey-High-53583-upgrade osc operator by changing subscription [Disruptive][Serial]", func() {
g.Skip("Upgrade tests should be manually done")
var (
subscriptionUpgrade = subscription
kataconfigUpgrade = kataconfig
testrunUpgradeWithSubscription = testrun
testrunConfigmapName = "osc-config-upgrade-subscription"
msg string
msgIfErr string
)
testrunUpgradeWithSubscription.checked = false
upgradeConfigMapExists, err := getTestRunParameters(oc, &subscriptionUpgrade, &kataconfigUpgrade, &testrunUpgradeWithSubscription, testrunConfigmapNs, testrunConfigmapName)
if err != nil {
e2e.Failf("ERROR: testrunUpgradeWithSubscription configmap %v errors: %v\n%v", testrunUpgradeWithSubscription, err)
}
if !upgradeConfigMapExists {
msg = fmt.Sprintf("SKIP: %v configmap does not exist. Cannot upgrade by changing subscription", testrunConfigmapName)
g.Skip(msg)
}
if testrunUpgradeWithSubscription.redirectNeeded {
if ocpMajorVer == "4" && minorVer <= 12 {
redirectType = "ImageContentSourcePolicy"
redirectFile = filepath.Join(testDataDir, "ImageContentSourcePolicy-brew.yaml")
}
err = applyImageRedirect(oc, redirectFile, redirectType, redirectName)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
}
if testrunUpgradeWithSubscription.catalogSourceName != subscription.catalogSourceName {
waitForCatalogReadyOrFail(oc, testrunUpgradeWithSubscription.catalogSourceName)
g.By("Check catalog for " + subscriptionUpgrade.subName)
label := fmt.Sprintf("catalog=%v", testrunUpgradeWithSubscription.catalogSourceName)
errCheck := wait.Poll(10*time.Second, 240*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().Run("get").Args("packagemanifest", "-l", label, "-n", subscriptionUpgrade.catalogSourceNamespace).Output()
if strings.Contains(msg, subscriptionUpgrade.subName) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v is not in the %v catalog. Cannot change subscription: %v %v", subscriptionUpgrade.subName, testrunUpgradeWithSubscription.catalogSourceName, msg, err))
msg, err = changeSubscriptionCatalog(oc, subscriptionUpgrade, testrunUpgradeWithSubscription)
msgIfErr = fmt.Sprintf("ERROR: patching the subscription catalog %v failed %v %v", subscriptionUpgrade, msg, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(msg).NotTo(o.BeEmpty(), msgIfErr)
msg, err = subscriptionIsFinished(oc, subscriptionUpgrade)
msgIfErr = fmt.Sprintf("ERROR: subscription wait for catalog patch %v failed %v %v", subscriptionUpgrade, msg, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(msg).NotTo(o.BeEmpty(), msgIfErr)
}
if testrunUpgradeWithSubscription.channel != subscription.channel {
g.By("Changing the subscription channel")
msg, err = changeSubscriptionChannel(oc, subscriptionUpgrade, testrunUpgradeWithSubscription)
msgIfErr = fmt.Sprintf("ERROR: patching the subscription channel %v: %v %v", subscriptionUpgrade, msg, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(msg).NotTo(o.BeEmpty(), msgIfErr)
// all pods restart & subscription gets recreated
msg, err = subscriptionIsFinished(oc, subscriptionUpgrade)
msgIfErr = fmt.Sprintf("ERROR: subscription wait after channel changed %v: %v %v", subscriptionUpgrade, msg, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(msg).NotTo(o.BeEmpty(), msgIfErr)
}
}) | |||||
test case | openshift/openshift-tests-private | c5a15459-7253-4903-b5f9-ed794d991bb9 | Author:vvoronko-High-60231-Scale-up deployment [Serial] | ['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-60231-Scale-up deployment [Serial]", func() {
oc.SetupProject()
var (
podNs = oc.Namespace()
deployName = "dep-60231-" + getRandomString()
initReplicas = 3
maxReplicas = 6
numOfVMs int
msg string
)
kataNodes := exutil.GetNodeListByLabel(oc, kataocLabel)
o.Expect(len(kataNodes) > 0).To(o.BeTrue(), fmt.Sprintf("kata nodes list is empty %v", kataNodes))
if !kataconfig.enablePeerPods {
g.By("Verify no instaces exists before the test")
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
//TO DO wait for some time to enable disposal of previous test instances
o.Expect(numOfVMs).To(o.Equal(0), fmt.Sprintf("initial number of VM instances should be zero"))
}
g.By("Create deployment config from template")
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "REPLICAS="+strconv.Itoa(initReplicas),
"-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName, "-p", "IMAGE="+testrun.workloadImage).OutputToFile(getRandomString() + "dep-common.json")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not create deployment configFile %v", configFile))
g.By("Applying deployment file " + configFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", configFile, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not apply configFile %v", msg))
g.By("Wait for deployment to be ready")
defer oc.AsAdmin().Run("delete").Args("deploy", "-n", podNs, deployName, "--ignore-not-found").Execute()
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
errReplicasMsg := fmt.Sprintf("Deployment %v number of ready replicas don't match requested", deployName)
o.Expect(msg).To(o.Equal(strconv.Itoa(initReplicas)), errReplicasMsg)
if !kataconfig.enablePeerPods {
g.By("Verifying actual number of VM instances")
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
o.Expect(numOfVMs).To(o.Equal(initReplicas), fmt.Sprintf("actual number of VM instances doesn't match"))
}
g.By(fmt.Sprintf("Scaling deployment from %v to %v", initReplicas, maxReplicas))
err = oc.AsAdmin().Run("scale").Args("deployment", deployName, "--replicas="+strconv.Itoa(maxReplicas), "-n", podNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not Scale deployment %v", msg))
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.Equal(strconv.Itoa(maxReplicas)), errReplicasMsg)
if !kataconfig.enablePeerPods {
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
o.Expect(numOfVMs).To(o.Equal(maxReplicas), fmt.Sprintf("actual number of VM instances doesn't match"))
}
g.By("SUCCESSS - deployment scale-up finished successfully")
}) | |||||
test case | openshift/openshift-tests-private | 10096bf8-270b-42a9-a435-252086becc2b | Author:vvoronko-High-60233-Scale-down deployment [Serial] | ['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-60233-Scale-down deployment [Serial]", func() {
oc.SetupProject()
var (
podNs = oc.Namespace()
deployName = "dep-60233-" + getRandomString()
initReplicas = 6
updReplicas = 3
numOfVMs int
msg string
)
kataNodes := exutil.GetNodeListByLabel(oc, kataocLabel)
o.Expect(len(kataNodes) > 0).To(o.BeTrue(), fmt.Sprintf("kata nodes list is empty %v", kataNodes))
if !kataconfig.enablePeerPods {
g.By("Verify no instaces exists before the test")
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
//TO DO wait for some time to enable disposal of previous test instances
o.Expect(numOfVMs).To(o.Equal(0), fmt.Sprintf("initial number of VM instances should be zero"))
}
g.By("Create deployment config from template")
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "REPLICAS="+strconv.Itoa(initReplicas), "-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName,
"-p", "IMAGE="+testrun.workloadImage).OutputToFile(getRandomString() + "dep-common.json")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not create deployment configFile %v", configFile))
g.By("Applying deployment file " + configFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", configFile, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not apply configFile %v", msg))
g.By("Wait for deployment to be ready")
defer oc.AsAdmin().Run("delete").Args("deploy", "-n", podNs, deployName, "--ignore-not-found").Execute()
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
errReplicasMsg := fmt.Sprintf("Deployment %v number of ready replicas don't match requested", deployName)
o.Expect(msg).To(o.Equal(strconv.Itoa(initReplicas)), errReplicasMsg)
if !kataconfig.enablePeerPods {
g.By("Verifying actual number of VM instances")
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
o.Expect(numOfVMs).To(o.Equal(initReplicas), fmt.Sprintf("actual number of VM instances doesn't match"))
}
g.By(fmt.Sprintf("Scaling deployment from %v to %v", initReplicas, updReplicas))
err = oc.AsAdmin().Run("scale").Args("deployment", deployName, "--replicas="+strconv.Itoa(updReplicas), "-n", podNs).Execute()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not Scale deployment %v", msg))
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).To(o.Equal(strconv.Itoa(updReplicas)), errReplicasMsg)
if !kataconfig.enablePeerPods {
numOfVMs = getTotalInstancesOnNodes(oc, opNamespace, kataNodes)
o.Expect(numOfVMs).To(o.Equal(updReplicas), fmt.Sprintf("actual number of VM instances doesn't match"))
}
g.By("SUCCESSS - deployment scale-down finished successfully")
}) | |||||
test case | openshift/openshift-tests-private | 0dee2e37-9711-4f01-a663-c7dde1f871af | Author:vvoronko-High-64043-expose-serice deployment | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-64043-expose-serice deployment", func() {
oc.SetupProject()
var (
podNs = oc.Namespace()
deployName = "dep-64043-" + getRandomString()
msg string
statusCode = 200
testPageBody = "Hello OpenShift!"
ocpHelloImage = "quay.io/openshifttest/hello-openshift:1.2.0" // should this be testrun.workloadImage?
)
g.By("Create deployment config from template")
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "IMAGE="+ocpHelloImage,
"-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName).OutputToFile(getRandomString() + "dep-common.json")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not create configFile %v", configFile))
g.By("Applying deployment file " + configFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", configFile, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not apply configFile %v", msg))
g.By("Wait for deployment to be ready")
defer oc.AsAdmin().Run("delete").Args("deploy", "-n", podNs, deployName, "--ignore-not-found").Execute()
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Deployment %v didn't reached expected state: %v", deployName, msg))
g.By("Expose deployment and its service")
defer deleteRouteAndService(oc, deployName, podNs)
host, err := createServiceAndRoute(oc, deployName, podNs)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("route host=%v", host)
g.By("send request via the route")
strURL := "http://" + host
resp, err := getHttpResponse(strURL, statusCode)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("send request via the route %v failed with: %v", strURL, err))
o.Expect(resp).To(o.ContainSubstring(testPageBody), fmt.Sprintf("Response doesn't match"))
g.By("SUCCESSS - deployment Expose service finished successfully")
}) | |||||
test case | openshift/openshift-tests-private | 092e57f0-38e0-4d8d-b5e9-5982926e670f | Author:vvoronko-High-63121-Peerpods-cluster-limit [Serial] | ['"fmt"', '"strconv"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-63121-Peerpods-cluster-limit [Serial]", func() {
//TODO edge case: check no podvms are up in the air somehow others test will fail
if !kataconfig.enablePeerPods {
g.Skip("63121 podvm limit test is only for peer pods")
}
oc.SetupProject()
var (
podNs = oc.Namespace()
deployName = "dep-63121-" + getRandomString()
podIntLimit = 2
defaultLimit = "10"
kataNodesAmount = len(exutil.GetNodeListByLabel(oc, kataocLabel))
msg string
cleanupRequired = true
)
defer func() {
if cleanupRequired {
e2e.Logf("Cleanup required, restoring to default %v", defaultLimit)
patchPeerPodLimit(oc, opNamespace, defaultLimit)
}
}()
patchPeerPodLimit(oc, opNamespace, strconv.Itoa(podIntLimit))
g.By("Create deployment config from template")
initReplicas := strconv.Itoa(podIntLimit * kataNodesAmount)
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", defaultDeployment,
"-p", "NAME="+deployName, "-p", "REPLICAS="+initReplicas,
"-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName, "-p", "IMAGE="+testrun.workloadImage).OutputToFile(getRandomString() + "dep-common.json")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not create deployment configFile %v", configFile))
g.By("Applying deployment file " + configFile)
msg, err = oc.AsAdmin().Run("apply").Args("-f", configFile, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not apply configFile %v", msg))
defer deleteKataResource(oc, "deploy", podNs, deployName)
g.By("Wait for deployment to be ready")
msg, err = waitForDeployment(oc, podNs, deployName)
e2e.Logf("Deployment has initially %v pods", msg)
o.Expect(err).NotTo(o.HaveOccurred())
errReplicasMsg := fmt.Sprintf("Deployment %v number of ready replicas don't match requested", deployName)
o.Expect(msg).To(o.Equal(initReplicas), errReplicasMsg)
extraReplicas := strconv.Itoa((podIntLimit + 1) * kataNodesAmount)
g.By(fmt.Sprintf("Scaling deployment from %v to %v", initReplicas, extraReplicas))
msg, err = oc.AsAdmin().Run("scale").Args("deployment", deployName, "--replicas="+extraReplicas, "-n", podNs).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not Scale deployment %v", msg))
extraPods := strconv.Itoa(kataNodesAmount)
g.By("Wait for 30sec to check deployment has " + extraPods + " pending pods w/o corresponding podvm, because of the limit")
errCheck := wait.Poll(30*time.Second, snooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().Run("get").Args("deploy", "-n", podNs, deployName, "-o=jsonpath={.status.unavailableReplicas}").Output()
if msg == extraPods {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Timed out waiting for %v additional pending pods %v %v", extraPods, msg, err))
msg, err = oc.AsAdmin().Run("get").Args("deploy", "-n", podNs, deployName, "-o=jsonpath={.status.readyReplicas}").Output()
o.Expect(msg).To(o.Equal(initReplicas), errReplicasMsg)
g.By("restore podvm limit")
patchPeerPodLimit(oc, opNamespace, defaultLimit)
cleanupRequired = false
msg, err = waitForDeployment(oc, podNs, deployName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Deployment has %v running pods after patching the limit", msg)
o.Expect(msg).To(o.Equal(extraReplicas), errReplicasMsg)
g.By("SUCCESSS - deployment peer pods podvm limit - finished successfully")
}) | |||||
test case | openshift/openshift-tests-private | 7c3935c8-c432-4a66-97f6-d79f6d27dd62 | Author:vvoronko-High-57339-Eligibility | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-57339-Eligibility", func() {
if !kataconfig.eligibility {
g.Skip("57339-Eligibility test is only for eligibility=true in kataconfig")
}
oc.SetupProject()
kataNodes := exutil.GetNodeListByLabel(oc, kataocLabel)
o.Expect(len(kataNodes) > 0).To(o.BeTrue(), fmt.Sprintf("kata nodes list is empty %v", kataNodes))
eligibleNodes := exutil.GetNodeListByLabel(oc, featureLabel)
o.Expect(len(eligibleNodes) == len(kataNodes)).To(o.BeTrue(), fmt.Sprintf("kata nodes list length is differ from eligible ones"))
for _, node := range kataNodes {
found, _ := exutil.StringsSliceContains(eligibleNodes, node)
o.Expect(found).To(o.BeTrue(), fmt.Sprintf("node %v is not in the list of eligible nodes %v", node, eligibleNodes))
}
}) | |||||
test case | openshift/openshift-tests-private | 4cf31d7a-6156-4ab9-8353-63e41de7bc89 | Author:vvoronko-High-67650-pod-with-filesystem | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-67650-pod-with-filesystem", func() {
oc.SetupProject()
var (
podNs = oc.Namespace()
pvcName = "pvc-67650-" + getRandomString()
capacity = "2"
)
err := createRWOfilePVC(oc, podNs, pvcName, capacity)
defer oc.WithoutNamespace().AsAdmin().Run("delete").Args("pvc", pvcName, "-n", podNs, "--ignore-not-found").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//some platforms provision automatically while others wait got the 1st customer with "Pending" status
//_, err = checkResourceJsonpath(oc, "pvc", pvcName, podNs, "-o=jsonpath={.status.phase}", "Bound", 30*time.Second, 5*time.Second)
//TODO: add a function that takes any pod and know to inject storage part to it)
// run pod with kata
//TODO: test IO
}) | |||||
test case | openshift/openshift-tests-private | 181688b9-83e5-422f-aadc-fbd65b908c9e | Author:tbuskey-High-66554-Check and verify control plane pods and other components | ['"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-High-66554-Check and verify control plane pods and other components", func() {
var (
duration time.Duration = 300
interval time.Duration = 10
)
testControlPod := func(resType, resName, desiredCountJsonPath, actualCountJsonPath, podLabel string) {
// Check the resource Type for desired count by looking at the jsonpath
// Check the actual count at this jsonpath
// Wait until the actual count == desired count then set expectedPods to the actual count
// Verify count of "Running" pods with podLabel matches expectedPods
expectedPods, msg, err := checkResourceJsonpathMatch(oc, resType, resName, subscription.namespace, desiredCountJsonPath, actualCountJsonPath)
if err != nil || msg == "" {
e2e.Logf("%v does not match %v in %v %v %v %v", desiredCountJsonPath, actualCountJsonPath, resName, resType, msg, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
msg, err = checkLabeledPodsExpectedRunning(oc, subscription.namespace, podLabel, expectedPods)
if err != nil || msg == "" {
e2e.Logf("Could not find pods labeled %v %v %v", podLabel, msg, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
}
testControlPod("deployment", "controller-manager", "-o=jsonpath={.spec.replicas}", "-o=jsonpath={.status.readyReplicas}", "control-plane=controller-manager")
testControlPod("daemonset", "openshift-sandboxed-containers-monitor", "-o=jsonpath={.status.desiredNumberScheduled}", "-o=jsonpath={.status.numberReady}", "name=openshift-sandboxed-containers-monitor")
if kataconfig.enablePeerPods {
testControlPod("deployment", "peer-pods-webhook", "-o=jsonpath={.spec.replicas}", "-o=jsonpath={.status.readyReplicas}", "app=peer-pods-webhook")
testControlPod("daemonset", "peerpodconfig-ctrl-caa-daemon", "-o=jsonpath={.status.desiredNumberScheduled}", "-o=jsonpath={.status.numberReady}", "name=peerpodconfig-ctrl-caa-daemon")
// Check for the peer pod RuntimeClass
msg, err := checkResourceExists(oc, "RuntimeClass", ppRuntimeClass, subscription.namespace, duration, interval)
if err != nil || msg == "" {
e2e.Logf("Could not find %v in RuntimeClass %v %v", ppRuntimeClass, msg, err)
}
// and kata RuntimeClass
msg, err = checkResourceExists(oc, "RuntimeClass", "kata", subscription.namespace, duration, interval)
if err != nil || msg == "" {
e2e.Logf("Could not find kata in RuntimeClass %v %v", msg, err)
}
}
}) | |||||
test case | openshift/openshift-tests-private | a3dafa11-0e1f-4ec2-8f2e-5f9c7d5a5b35 | Author:tbuskey-High-68945-Check FIPS on pods | ['"fmt"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-High-68945-Check FIPS on pods", func() {
if !clusterHasEnabledFIPS(oc, subscription.namespace) {
g.Skip("The cluster does not have FIPS enabled")
}
oc.SetupProject()
podNamespace := oc.Namespace()
podName := createKataPod(oc, podNamespace, defaultPod, "pod68945", kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNamespace, podName)
msg, err := checkResourceJsonpath(oc, "pod", podName, podNamespace, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: pod %v could not be created: %v %v", podName, msg, err))
msgIfErr := "ERROR: The cluster is in FIPS but pods are not"
// check that the pod(vm) booted with fips
podCmdline, podCmdlineErr := oc.AsAdmin().Run("rsh").Args("-T", "-n", podNamespace, podName, "cat", "/proc/cmdline").Output()
if podCmdlineErr != nil || !strings.Contains(podCmdline, "fips=1") {
msgIfErr = fmt.Sprintf("%v\nERROR: %v did not boot with fips enabled:%v %v", msgIfErr, podName, podCmdline, podCmdlineErr)
}
// check that pod(vm) has fips enabled
podFipsEnabled, podFipsEnabledErr := oc.AsAdmin().Run("rsh").Args("-T", "-n", podNamespace, podName, "cat", "/proc/sys/crypto/fips_enabled").Output()
if podFipsEnabledErr != nil || podFipsEnabled != "1" {
msgIfErr = fmt.Sprintf("%v\nERROR: %v does not have fips_enabled: %v %v", msgIfErr, podName, podFipsEnabled, podFipsEnabledErr)
}
// fail with all possible debugging logs included
o.Expect(podCmdlineErr).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(podCmdline).To(o.ContainSubstring("fips=1"), msgIfErr)
o.Expect(podFipsEnabledErr).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(podFipsEnabled).To(o.Equal("1"), msgIfErr)
}) | |||||
test case | openshift/openshift-tests-private | 181b85b0-78b4-4494-9a49-eff2be74d502 | Author:vvoronko-High-68930-deploy peerpod with type annotation | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-68930-deploy peerpod with type annotation", func() {
if testrun.workloadToTest == "coco" {
g.Skip("Test not supported with coco")
}
oc.SetupProject()
var (
basePodName = "-example-68930"
podNs = oc.Namespace()
annotations = map[string]string{
"MEMORY": "256",
"CPU": "0",
"INSTANCESIZE": "",
}
instanceSize = map[string]string{
"aws": "t3.xlarge",
"azure": "Standard_D4as_v5",
}
)
provider := getCloudProvider(oc)
val, ok := instanceSize[provider]
if !(kataconfig.enablePeerPods && ok) {
g.Skip("68930-deploy peerpod with type annotation supported only for kata-remote on AWS and AZURE")
}
annotations["INSTANCESIZE"] = val
g.By("Deploying pod with kata runtime and verify it")
podName, err := createKataPodAnnotated(oc, podNs, podAnnotatedTemplate, basePodName, kataconfig.runtimeClassName, testrun.workloadImage, annotations)
defer deleteKataResource(oc, "pod", podNs, podName)
o.Expect(err).NotTo(o.HaveOccurred())
actualSize, err := getPeerPodMetadataInstanceType(oc, podNs, podName, provider)
e2e.Logf("Podvm with required instance type %v was launched as %v", instanceSize[provider], actualSize)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed rsh to pod %v to provide metadata: %v", podName, err))
o.Expect(actualSize).To(o.Equal(instanceSize[provider]), fmt.Sprintf("Instance size don't match provided annotations: %v", err))
g.By("SUCCESS - Podvm with required instance type was launched")
}) | |||||
test case | openshift/openshift-tests-private | e0037cdf-94fe-404b-803a-48c3ecce669e | Author:vvoronko-High-69018-deploy peerpod with default vcpu and memory | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-69018-deploy peerpod with default vcpu and memory", func() {
if testrun.workloadToTest == "coco" {
g.Skip("Test not supported with coco")
}
oc.SetupProject()
var (
basePodName = "-example-69018"
podNs = oc.Namespace()
annotations = map[string]string{
"MEMORY": "6000",
"CPU": "2",
"INSTANCESIZE": "",
}
instanceSize = map[string]string{
"aws": "t3.large",
"azure": "Standard_D2as_v5",
}
)
provider := getCloudProvider(oc)
val, ok := instanceSize[provider]
if !(kataconfig.enablePeerPods && ok) {
g.Skip("69018-deploy peerpod with type annotation not supported on " + provider)
}
annotations["INSTANCESIZE"] = val
g.By("Deploying pod with kata runtime and verify it")
podName, err := createKataPodAnnotated(oc, podNs, podAnnotatedTemplate, basePodName, kataconfig.runtimeClassName, testrun.workloadImage, annotations)
defer deleteKataResource(oc, "pod", podNs, podName)
o.Expect(err).NotTo(o.HaveOccurred())
actualSize, err := getPeerPodMetadataInstanceType(oc, podNs, podName, provider)
e2e.Logf("Podvm with required instance type %v was launched as %v", instanceSize[provider], actualSize)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed rsh to pod %v to provide metadata: %v", podName, err))
o.Expect(actualSize).To(o.Equal(instanceSize[provider]), fmt.Sprintf("Instance size don't match provided annotations: %v", err))
g.By("SUCCESS - Podvm with required instance type was launched")
}) | |||||
test case | openshift/openshift-tests-private | 81f04a03-4e09-47dd-8d92-780c37472d89 | Author:vvoronko-High-69589-deploy kata with cpu and memory annotation | ['"fmt"', '"golang.org/x/exp/slices"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-69589-deploy kata with cpu and memory annotation", func() {
oc.SetupProject()
var (
basePodName = "-example-69589"
podNs = oc.Namespace()
annotations = map[string]string{
"MEMORY": "1234",
"CPU": "2",
"INSTANCESIZE": "",
}
supportedProviders = []string{"azure", "gcp", "none"}
memoryOptions = fmt.Sprintf("-m %vM", annotations["MEMORY"])
)
provider := getCloudProvider(oc)
if kataconfig.enablePeerPods || !slices.Contains(supportedProviders, provider) {
g.Skip("69589-deploy kata with type annotation supported only for kata runtime on platforms with nested virtualization enabled")
}
g.By("Deploying pod with kata runtime and verify it")
podName, err := createKataPodAnnotated(oc, podNs, podAnnotatedTemplate, basePodName, kataconfig.runtimeClassName, testrun.workloadImage, annotations)
defer deleteKataResource(oc, "pod", podNs, podName)
o.Expect(err).NotTo(o.HaveOccurred())
//get annotations from the live pod
podAnnotations, _ := oc.Run("get").Args("pods", podName, "-o=jsonpath={.metadata.annotations}", "-n", podNs).Output()
podCmd := []string{"-n", oc.Namespace(), podName, "--", "nproc"}
//check CPU available from the kata pod itself by nproc command:
actualCPU, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(podCmd...).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("'oc exec %v' Failed", podCmd))
strErr := fmt.Sprintf("Actual CPU count for the pod %v isn't matching expected %v full annotations:\n%v", actualCPU, annotations["CPU"], podAnnotations)
o.Expect(actualCPU).To(o.Equal(annotations["CPU"]), strErr)
//check MEMORY from the node running kata VM:
nodeName, _ := exutil.GetPodNodeName(oc, podNs, podName)
cmd := "ps -ef | grep uuid | grep -v grep"
vmFlags, err := exutil.DebugNodeWithOptionsAndChroot(oc, nodeName, []string{"-q"}, "bin/sh", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed debug node to get qemu instance options"))
strErr = fmt.Sprintf("VM flags for the pod doesn't contain expected %v full annotations:\n%v", memoryOptions, podAnnotations)
o.Expect(vmFlags).To(o.ContainSubstring(memoryOptions), strErr)
g.By("SUCCESS - KATA pod with required VM instance size was launched")
}) | |||||
test case | openshift/openshift-tests-private | b95d8ebd-5eb9-48a2-a7d7-a05bcaf69a20 | Author:abhbaner-High-66123-podvm Image ID check peer pods | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:abhbaner-High-66123-podvm Image ID check peer pods", func() {
var (
msg string
err error
imageID string
)
if !kataconfig.enablePeerPods {
g.Skip("OCP-66123 is only for peerpods")
}
oc.SetupProject()
cloudPlatform := getCloudProvider(oc)
// check if IMAGE ID exists in peer-pod-cm
msg, err, imageID = CheckPodVMImageID(oc, ppConfigMapName, cloudPlatform, opNamespace)
if imageID == "" {
e2e.Logf("IMAGE ID: %v", imageID)
msgIfErr := fmt.Sprintf("ERROR: IMAGE ID could not be retrieved from the peer-pods-cm even after kataconfig install: %v %v %v", imageID, msg, err)
o.Expect(imageID).NotTo(o.BeEmpty(), msgIfErr)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
}
e2e.Logf("The Image ID present in the peer-pods-cm is: %v , msg: %v", imageID, msg)
g.By("SUCCESS - IMAGE ID check complete")
}) | |||||
test case | openshift/openshift-tests-private | 9b77a87e-6004-4fd1-8447-4f843c1b5979 | Author:tbuskey-Medium-70824-Catalog upgrade osc operator [Disruptive] | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-Medium-70824-Catalog upgrade osc operator [Disruptive]", func() {
g.Skip("Upgrade tests should be manually done")
upgradeCatalog := UpgradeCatalogDescription{
name: "osc-config-upgrade-catalog",
namespace: "default",
exists: false,
imageAfter: "",
imageBefore: "",
catalogName: subscription.catalogSourceName,
}
err := getUpgradeCatalogConfigMap(oc, &upgradeCatalog)
if !upgradeCatalog.exists {
skipMessage := fmt.Sprintf("%v configmap for Catalog upgrade does not exist", upgradeCatalog.name)
g.Skip(skipMessage)
}
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: could not get %v configmap in ns %v %v", upgradeCatalog.name, upgradeCatalog.namespace, err))
// what is the current CSV name?
csvNameBefore, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", subscription.subName, "-n", subscription.namespace, "-o=jsonpath={.status.currentCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: could not get the CSV name of sub %v %v %v", subscription.subName, csvNameBefore, err))
o.Expect(csvNameBefore).NotTo(o.BeEmpty(), fmt.Sprintf("ERROR: the csv name is empty for sub %v", subscription.subName))
// what is the controller-manager pod name?
listOfPodsBefore, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", subscription.namespace, "-l", "control-plane=controller-manager", "-o=jsonpath={.items..metadata.name}").Output()
err = changeCatalogImage(oc, upgradeCatalog.catalogName, upgradeCatalog.imageAfter)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: could not change catalog %v image to %v Error %v", upgradeCatalog.catalogName, upgradeCatalog.imageAfter, err))
e2e.Logf("Waiting for pods (%v) to get replaced", listOfPodsBefore)
waitForPodsToTerminate(oc, subscription.namespace, listOfPodsBefore)
// subscription .status.installedCsv is "AtLatestKnown" & will not changed so it doesn't show subscription is done
// wait until the currentCSV in the sub changes & get the new CSV name
csvNameAfter, _ := checkResourceJsonPathChanged(oc, "sub", subscription.subName, subscription.namespace, "-o=jsonpath={.status.currentCSV}", csvNameBefore, 300*time.Second, 10*time.Second)
e2e.Logf("Watch CSV %v to show Succeed", csvNameAfter)
_, _ = checkResourceJsonpath(oc, "csv", csvNameAfter, subscription.namespace, "-o=jsonpath={.status.phase}", "Succeeded", 300*time.Second, 10*time.Second)
}) | |||||
test case | openshift/openshift-tests-private | a6c4cdd7-8360-4aeb-936b-4c8a531a90e8 | Author:vvoronko-High-C00210-run [peerpodGPU] cuda-vectoradd | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-C00210-run [peerpodGPU] cuda-vectoradd", func() {
oc.SetupProject()
var (
basePodName = "-example-00210"
cudaImage = "nvidia/samples:vectoradd-cuda11.2.1"
podNs = oc.Namespace()
instanceSize = map[string]string{
"aws": "g5.2xlarge",
"azure": "Standard_NC8as_T4_v3",
}
phase = "Succeeded"
logPassed = "Test PASSED"
)
if !(kataconfig.enablePeerPods && testrun.enableGPU) {
g.Skip("210-run peerpod with GPU cuda-vectoradd supported only with GPU enabled in podvm")
}
instance := instanceSize[getCloudProvider(oc)]
g.By("Deploying pod with kata runtime and verify it")
newPodName := getRandomString() + basePodName
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", podAnnotatedTemplate,
"-p", "NAME="+newPodName,
"-p", "RUNTIMECLASSNAME="+kataconfig.runtimeClassName,
"-p", "INSTANCESIZE="+instance,
"-p", "IMAGE="+cudaImage).OutputToFile(getRandomString() + "Pod-common.json")
o.Expect(err).NotTo(o.HaveOccurred())
podName, err := createKataPodFromTemplate(oc, podNs, newPodName, configFile, kataconfig.runtimeClassName, phase)
defer deleteKataResource(oc, "pod", podNs, podName)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("vectoradd-cuda on peer pod with GPU instance type %v reached %v phase", instance, phase)
//verify the log of the pod
log, err := exutil.GetSpecificPodLogs(oc, podNs, "", podName, "")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("unable to get the pod (%s) logs", podName))
o.Expect(log).To(o.ContainSubstring(logPassed), "required lines are missing in log")
g.By("SUCCESS - Podvm with GPU instance type was launched successfully")
}) | |||||
test case | openshift/openshift-tests-private | 2d5c1d5e-a39c-4137-9d1e-372ca98aa628 | Author:Anjana-High-43221-Verify PodVM image creation job completion | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:Anjana-High-43221-Verify PodVM image creation job completion", func() {
if getCloudProvider(oc) != "libvirt" {
g.Skip("43221 PodVM image creation job is specific to libvirt")
}
if !kataconfig.enablePeerPods {
g.Skip("43221 PodVM image creation job is only for peer pods")
}
g.By("Checking the status of the PodVM image creation job")
msg, err := verifyImageCreationJobSuccess(oc, opNamespace, ppParam, ppParamsLibvirtConfigMapName, cloudPlatform)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
expMsg := "Uploaded the image successfully"
o.Expect(strings.Contains(msg, expMsg)).To(o.BeTrue(), fmt.Sprintf("Expected message: %v not found in the job output.", expMsg))
g.By("SUCCESS - PodVM image creation job completed successfully")
}) | |||||
test case | openshift/openshift-tests-private | 2832f029-26f9-4b89-8f36-302eabb88463 | Author:Anjana-High-422081-Verify SE-enabled pod deployment | ['"fmt"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:Anjana-High-422081-Verify SE-enabled pod deployment", func() {
if getCloudProvider(oc) != "libvirt" {
g.Skip("422081 SE-enabled pod deployment is specific to libvirt")
}
oc.SetupProject()
var (
msg string
err error
defaultPodName = "-se-check"
podNs = oc.Namespace()
)
g.By("Deploying pod to verify SE enablement")
newPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer func() {
deleteKataResource(oc, "pod", podNs, newPodName)
g.By("Deleted SE-enabled pod")
}()
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
if err != nil {
e2e.Logf("ERROR: pod %v could not be installed: %v %v", newPodName, msg, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
g.By("SUCCESS - Pod installed for SE verification")
g.By("Checking if pod is SE-enabled")
err = checkSEEnabled(oc, newPodName, podNs)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v", err))
}) | |||||
test case | openshift/openshift-tests-private | 5c7f075a-2af9-4bdb-9e2a-783fa40d1777 | Author:tbuskey-High-C00316-run and verify cosigned pod | ['"fmt"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:tbuskey-High-C00316-run and verify cosigned pod", func() {
if testrun.workloadToTest != "coco" {
g.Skip("Run and verify cosigned pod is only for workloadToTest = 'coco'")
}
oc.SetupProject()
var (
podName = "ocp-cc-pod"
testNamespace = oc.Namespace()
podLastEventReason string
loopCount int
loopMax = 450
countIncrement = 15
sleepTime = time.Duration(countIncrement) * time.Second
outputFromOc string
)
defer deleteResource(oc, "pod", podName, testNamespace, 90*time.Second, 10*time.Second)
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", trusteeCosignedPodFile, "-n", testNamespace).Output()
if err != nil {
e2e.Logf("Error: applying cosigned pod file %v failed: %v %v", trusteeCosignedPodFile, msg, err)
}
for !strings.Contains(podLastEventReason, "Started") && loopCount < loopMax {
loopCount = loopCount + countIncrement
outputFromOc, err = oc.AsAdmin().WithoutNamespace().Run("events").Args("-o=jsonpath={.items..reason}", "-n", testNamespace).Output()
splitString := strings.Split(outputFromOc, " ")
podLastEventReason = splitString[len(splitString)-1]
e2e.Logf("%v pod event reason: %v", podName, podLastEventReason)
if strings.Contains(outputFromOc, "Failed") || loopCount >= loopMax {
err = fmt.Errorf("pod %v failed err: %v timeout: %v of %v\n\n", podName, err, loopCount, loopMax)
}
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: %v", err))
time.Sleep(sleepTime)
}
}) | |||||
test case | openshift/openshift-tests-private | c482916f-9293-4f17-aad0-89fa69c650ae | Author:vvoronko-High-C00317-delete operator with running workload [Serial] | ['"fmt"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-C00317-delete operator with running workload [Serial]", func() {
oc.SetupProject()
var (
msg string
err error
defaultPodName = "-example-00317"
podNs = oc.Namespace()
)
g.By("Deploying pod with kata runtime and verify it")
fstPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, fstPodName)
msg, err = checkResourceJsonpath(oc, "pod", fstPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: pod %v could not be installed: %v %v", fstPodName, msg, err))
g.By("delete csv and sub")
msg, err = deleteOperator(oc, subscription)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("msg:%v err:%v", msg, err))
g.By("verify control plane pods are running")
if kataconfig.enablePeerPods {
msg, err = testControlPod(oc, subscription.namespace, "daemonset", "peerpodconfig-ctrl-caa-daemon",
"-o=jsonpath={.status.desiredNumberScheduled}", "-o=jsonpath={.status.numberReady}", "name=peerpodconfig-ctrl-caa-daemon")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("msg:%v err:%v", msg, err))
msg, err = testControlPod(oc, subscription.namespace, "deployment", "peer-pods-webhook",
"-o=jsonpath={.spec.replicas}", "-o=jsonpath={.status.readyReplicas}", "app=peer-pods-webhook")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("msg:%v err:%v", msg, err))
}
msg, err = testControlPod(oc, subscription.namespace, "daemonset", "openshift-sandboxed-containers-monitor",
"-o=jsonpath={.status.desiredNumberScheduled}", "-o=jsonpath={.status.numberReady}", "name=openshift-sandboxed-containers-monitor")
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("msg:%v err:%v", msg, err))
g.By("monitor the 1st pod is still running")
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", fstPodName, "-n", podNs, "-o=jsonpath={.status.phase}").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: pod %v is not in expected state: %v, actual is: %v %v", fstPodName, podRunState, msg, err))
//launch another pod
secPodName := createKataPod(oc, podNs, defaultPod, defaultPodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, secPodName)
msg, err = checkResourceJsonpath(oc, "pod", secPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: pod %v could not be installed: %v %v", secPodName, msg, err))
g.By("SUCCESS - operator deleted while workload keep running")
}) | |||||
test case | openshift/openshift-tests-private | d4ed4715-eae9-40de-8bfe-f3d133a5da7f | Author:vvoronko-High-C00999-deploy peerpod with tags | ['"fmt"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata.go | g.It("Author:vvoronko-High-C00999-deploy peerpod with tags", func() {
if !(testrun.workloadToTest == "peer-pods" && getCloudProvider(oc) == "azure") {
g.Skip("Test supported only with peer-pods on Azure since AWS tags disabled for metadata by default")
}
oc.SetupProject()
var (
basePodName = "-example-00999"
podNs = oc.Namespace()
//works with default configmap value
tagValue = map[string]string{
"aws": "value1",
"azure": "key1:value1;key2:value2", //format is different than in configmap
}
)
provider := getCloudProvider(oc)
g.By("Deploying pod with kata runtime and verify it")
newPodName := createKataPod(oc, podNs, defaultPod, basePodName, kataconfig.runtimeClassName, testrun.workloadImage)
defer deleteKataResource(oc, "pod", podNs, newPodName)
msg, err := checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", podRunState, podSnooze*time.Second, 10*time.Second)
if err != nil {
e2e.Logf("ERROR: pod %v could not be installed: %v %v", newPodName, msg, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
actualValue, err := getPeerPodMetadataTags(oc, podNs, newPodName, provider)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed rsh to pod %v to provide metadata: %v", newPodName, err))
e2e.Logf("%v pod tags: %v", newPodName, actualValue)
o.Expect(actualValue).To(o.ContainSubstring(tagValue[provider]), fmt.Sprintf("Instance size don't match provided annotations: %v", err))
g.By("SUCCESS - Podvm with required instance type was launched")
}) | |||||
file | openshift/openshift-tests-private | 477c51d0-abce-4ed9-9370-d3881f455f0f | kata_util | import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/gjson"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | // Package kata operator tests
package kata
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/tidwall/gjson"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type SubscriptionDescription struct {
subName string `json:"name"`
namespace string `json:"namespace"`
channel string `json:"channel"`
ipApproval string `json:"installPlanApproval"`
operatorPackage string `json:"spec.name"`
catalogSourceName string `json:"source"`
catalogSourceNamespace string `json:"sourceNamespace"`
template string
}
type KataconfigDescription struct {
name string `json:"name"`
logLevel string `json:"logLevel"`
eligibility bool `json:"checkNodeEligibility"`
runtimeClassName string `json:"runtimeClassName"`
enablePeerPods bool `json:"enablePeerPods"`
template string
}
type TestRunDescription struct {
checked bool
catalogSourceName string
channel string
redirectNeeded bool
mustgatherImage string
operatorVer string
eligibility bool
labelSingleNode bool
eligibleSingleNode bool
runtimeClassName string
enablePeerPods bool
enableGPU bool
podvmImageUrl string
workloadImage string
installKataRPM bool
workloadToTest string
trusteeCatalogSourcename string
trusteeUrl string
}
// If you changes this please make changes to func createPeerPodSecrets
type PeerpodParam struct {
AWS_SUBNET_ID string
AWS_VPC_ID string
PODVM_INSTANCE_TYPE string
PROXY_TIMEOUT string
VXLAN_PORT string
AWS_REGION string
AWS_SG_IDS string
PODVM_AMI_ID string
CLOUD_PROVIDER string
AZURE_REGION string
AZURE_RESOURCE_GROUP string
AZURE_IMAGE_ID string
AZURE_INSTANCE_SIZE string
AZURE_NSG_ID string
AZURE_SUBNET_ID string
LIBVIRT_KVM_HOST_ADDRESS string
LIBVIRT_PODVM_DISTRO string
LIBVIRT_CAA_SRC string
LIBVIRT_CAA_REF string
LIBVIRT_DOWNLOAD_SOURCES string
LIBVIRT_CONFIDENTIAL_COMPUTE_ENABLED string
LIBVIRT_UPDATE_PEERPODS_CM string
LIBVIRT_ORG_ID string
LIBVIRT_BASE_OS_VERSION string
LIBVIRT_IMAGE_NAME string
LIBVIRT_PODVM_TAG string
LIBVIRT_SE_BOOT string
LIBVIRT_PODVM_IMAGE_URI string
}
type UpgradeCatalogDescription struct {
name string
namespace string
exists bool
imageAfter string
imageBefore string
catalogName string
}
var (
snooze time.Duration = 2400
kataSnooze time.Duration = 7200 // Installing/deleting kataconfig reboots nodes. AWS BM takes 20 minutes/node
podSnooze time.Duration = 600 // Peer Pods take longer than 2 minutes
resSnoose time.Duration = 300 // to delete csv or sub 5min should be enough
podRunState = "Running"
featureLabel = "feature.node.kubernetes.io/runtime.kata=true"
workerLabel = "node-role.kubernetes.io/worker"
kataocLabel = "node-role.kubernetes.io/kata-oc"
customLabel = "custom-label=test"
kataconfigStatusQuery = "-o=jsonpath={.status.conditions[?(@.type=='InProgress')].status}"
allowedWorkloadTypes = [3]string{"kata", "peer-pods", "coco"}
)
func ensureNamespaceIsInstalled(oc *exutil.CLI, namespace, namespaceTemplateFile string) (err error) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", namespace, "--no-headers").Output()
if err != nil || strings.Contains(msg, "Error from server (NotFound)") {
namespaceFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", namespaceTemplateFile,
"-p", "NAME="+namespace).OutputToFile(getRandomString() + "namespaceFile.json")
if err != nil || namespaceFile == "" {
if !strings.Contains(namespaceFile, "already exists") {
_, statErr := os.Stat(namespaceFile)
if statErr != nil {
err = fmt.Errorf("ERROR creating the namespace (%v) yaml %s, %v", namespace, namespaceFile, statErr)
return err
}
}
}
msg, err = oc.AsAdmin().Run("apply").Args("-f", namespaceFile).Output()
if strings.Contains(msg, "AlreadyExists") || strings.Contains(msg, "unchanged") || strings.Contains(msg, "created") {
return nil
}
if err != nil {
return fmt.Errorf(" applying namespace file (%v) issue: %v %v", namespaceFile, msg, err)
}
}
return err
}
func ensureOperatorGroupIsInstalled(oc *exutil.CLI, namespace, templateFile string) (err error) {
msg, err := oc.AsAdmin().Run("get").Args("operatorgroup", "-n", namespace, "--no-headers").Output()
if err != nil || strings.Contains(msg, "No resources found in") {
operatorgroupFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", templateFile,
"-p", "NAME="+namespace, "NAMESPACE="+namespace).OutputToFile(getRandomString() + "operatorgroupFile.json")
if err != nil || operatorgroupFile != "" {
if !strings.Contains(operatorgroupFile, "already exists") {
_, statErr := os.Stat(operatorgroupFile)
if statErr != nil {
err = fmt.Errorf("ERROR creating the operatorgroup (%v) yaml %v, %v", namespace, operatorgroupFile, statErr)
return err
}
}
}
msg, err = oc.AsAdmin().Run("apply").Args("-f", operatorgroupFile, "-n", namespace).Output()
if strings.Contains(msg, "AlreadyExists") || strings.Contains(msg, "unchanged") || strings.Contains(msg, "created") {
return nil
}
if err != nil {
return fmt.Errorf("applying operatorgroup file (%v) issue %v %v", operatorgroupFile, msg, err)
}
}
return err
}
func ensureOperatorIsSubscribed(oc *exutil.CLI, sub SubscriptionDescription, subTemplate string) (err error) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.subName, "-n", sub.namespace, "--no-headers").Output()
if err != nil || strings.Contains(msg, "Error from server (NotFound):") {
subFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", sub.template, "-p", "SUBNAME="+sub.subName, "SUBNAMESPACE="+sub.namespace, "CHANNEL="+sub.channel,
"APPROVAL="+sub.ipApproval, "OPERATORNAME="+sub.operatorPackage, "SOURCENAME="+sub.catalogSourceName, "SOURCENAMESPACE="+sub.catalogSourceNamespace, "-n", sub.namespace).OutputToFile(getRandomString() + "subscriptionFile.json")
if err != nil || subFile != "" {
if !strings.Contains(subFile, "already exists") {
_, subFileExists := os.Stat(subFile)
if subFileExists != nil {
err = fmt.Errorf("ERROR creating the subscription yaml %s, %v", subFile, err)
return err
}
}
}
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", subFile).Output()
if err != nil || msg == "" {
err = fmt.Errorf("ERROR applying subscription %v: %v, %v", subFile, msg, err)
return err
}
}
_, err = subscriptionIsFinished(oc, sub)
return err
}
func ensureFeatureGateIsApplied(oc *exutil.CLI, sub SubscriptionDescription, featureGatesFile string) (err error) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "osc-feature-gates", "-n", sub.namespace, "--no-headers").Output()
if strings.Contains(msg, "Error from server (NotFound)") {
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", featureGatesFile).Output()
if err != nil && !strings.Contains(msg, "already exists exit") {
err = fmt.Errorf("featureGates cm issue %v %v", msg, err)
}
}
return err
}
func ensureTrusteeKbsServiceRouteExists(oc *exutil.CLI, namespace, routeType, routeName string) (err error) {
var (
msg string
)
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("route", routeName, "-n", namespace, "--no-headers").Output()
if err == nil && strings.Contains(msg, routeName) {
return nil
}
if strings.Contains(msg, "(NotFound)") {
msg, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("route", routeType, "--service="+routeName, "--port", "kbs-port", "-n", namespace).Output()
if strings.Contains(msg, "route.route.openshift.io/"+routeName+" created") || strings.Contains(msg, "(AlreadyExists)") {
return nil
}
}
return err
}
func ensureTrusteeUrlReturnIsValid(oc *exutil.CLI, kbsClientTemplate, trusteeUrl, correctAnswer, trusteeNamespace string) (err error) {
var (
podName = "kbs-client"
kbsClientImage = "quay.io/confidential-containers/kbs-client:v0.9.0"
phase = "Running"
outputFromOc string
namespace = "default"
)
// make sure the trustee deployment pod is ready
trusteeDeploymentPod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", trusteeNamespace,
"-l", "app=kbs", "-o=jsonpath={.items[0].metadata.name}").Output()
outputFromOc, err = checkResourceJsonpath(oc, "pod", trusteeDeploymentPod, trusteeNamespace, "-o=jsonpath={.status.phase}", phase, podSnooze*time.Second, 10*time.Second)
if outputFromOc == "" || err != nil {
return fmt.Errorf("Could not get pod (%v) status %v: %v %v", trusteeDeploymentPod, phase, outputFromOc, err)
}
kbsClientFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f",
kbsClientTemplate, "-p", "NAME="+podName, "IMAGE="+kbsClientImage).OutputToFile(getRandomString() + "kbsClientFile.json")
if kbsClientFile == "" {
return fmt.Errorf("Did not get a filename when processing %v: err:%v", kbsClientTemplate, err)
}
defer deleteKataResource(oc, "pod", namespace, podName)
outputFromOc, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", kbsClientFile, "-n", namespace).Output()
if err != nil {
e2e.Logf("WARNING: creating kbs-client %v err: %v", outputFromOc, err)
}
outputFromOc, err = checkResourceJsonpath(oc, "pod", podName, namespace, "-o=jsonpath={.status.phase}", phase, podSnooze*time.Second, 10*time.Second)
if err != nil {
return fmt.Errorf("Could not get pod (%v) status %v: %v err: %v", podName, phase, outputFromOc, err)
}
kbsAnswer, err := oc.AsAdmin().Run("rsh").Args("-T", "-n", namespace,
podName, "kbs-client", "--url", trusteeUrl, "get-resource", "--path", "default/kbsres1/key1").Output()
if err != nil || kbsAnswer != "cmVzMXZhbDE=" {
return fmt.Errorf("Could not query trustee at %v. %v err %v", trusteeUrl, kbsAnswer, err)
}
return err
}
func ensureTrusteeIsInstalled(oc *exutil.CLI, subscription SubscriptionDescription, namespaceTemplate, ogTemplate, subTemplate string) (trusteeRouteHost string, err error) {
err = ensureNamespaceIsInstalled(oc, subscription.namespace, namespaceTemplate)
if err != nil {
return trusteeRouteHost, err
}
err = ensureOperatorGroupIsInstalled(oc, subscription.namespace, ogTemplate)
if err != nil {
return trusteeRouteHost, err
}
err = ensureOperatorIsSubscribed(oc, subscription, subTemplate)
if err != nil {
return trusteeRouteHost, err
}
trusteeRouteName := "kbs-service"
err = ensureTrusteeKbsServiceRouteExists(oc, subscription.namespace, "edge", trusteeRouteName)
if err != nil {
return trusteeRouteHost, err
}
trusteeRouteHost, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("route", trusteeRouteName, "-o=jsonpath={.spec.host}", "-n", subscription.namespace).Output()
if trusteeRouteHost == "" {
err = fmt.Errorf("trusteeRouteHost was empty. err %v", err)
}
return trusteeRouteHost, err
}
func ensureConfigmapIsApplied(oc *exutil.CLI, namespace, configmapFile string) (err error) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configmapFile, "-n", namespace).Output()
if err != nil && !strings.Contains(msg, "already exists exit") {
err = fmt.Errorf("configmap %v file issue %v %v", configmapFile, msg, err)
}
return err
}
func ensureKataconfigIsCreated(oc *exutil.CLI, kataconf KataconfigDescription, sub SubscriptionDescription) (msg string, err error) {
// If this is used, label the caller with [Disruptive][Serial][Slow]
// If kataconfig already exists, this must not error
var (
configFile string
)
_, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kataconf.name, "--no-headers", "-n", sub.namespace).Output()
if err == nil {
// kataconfig exists. Is it finished?
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kataconf.name, "-n", sub.namespace, kataconfigStatusQuery).Output()
if strings.ToLower(msg) == "false" {
g.By("(3) kataconfig is previously installed")
return msg, err // no need to go through the rest
}
}
g.By("(3) Make sure subscription has finished before kataconfig")
msg, err = subscriptionIsFinished(oc, sub)
if err != nil {
e2e.Logf("The subscription has not finished: %v %v", msg, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).NotTo(o.BeEmpty())
g.By("(3.1) Create kataconfig file")
configFile, err = oc.AsAdmin().WithoutNamespace().Run("process").Args("--ignore-unknown-parameters=true", "-f", kataconf.template,
"-p", "NAME="+kataconf.name, "LOGLEVEL="+kataconf.logLevel, "PEERPODS="+strconv.FormatBool(kataconf.enablePeerPods), "ELIGIBILITY="+strconv.FormatBool(kataconf.eligibility),
"-n", sub.namespace).OutputToFile(getRandomString() + "kataconfig-common.json")
if err != nil || configFile == "" {
_, configFileExists := os.Stat(configFile)
if configFileExists != nil {
e2e.Logf("issue creating kataconfig file is %s, %v", configFile, err)
}
}
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "controller-manager-service", "-n", sub.namespace).Output()
e2e.Logf("Controller-manager-service: %v %v", msg, err)
g.By("(3.2) Apply kataconfig file")
// -o=jsonpath={.status.installationStatus.IsInProgress} "" at this point
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Output()
if err != nil {
e2e.Logf("Error: applying kataconfig %v failed: %v %v", configFile, msg, err)
}
// If it is already applied by a parallel test there will be an err
g.By("(3.3) Check kataconfig creation has started")
_, _ = checkResourceExists(oc, "kataconfig", kataconf.name, sub.namespace, snooze*time.Second, 10*time.Second)
g.By("(3.4) Wait for kataconfig to finish install")
// Installing/deleting kataconfig reboots nodes. AWS BM takes 20 minutes/node
msg, err = waitForKataconfig(oc, kataconf.name, sub.namespace)
return msg, err
}
func createKataPodAnnotated(oc *exutil.CLI, podNs, template, basePodName, runtimeClassName, workloadImage string, annotations map[string]string) (msg string, err error) {
var (
newPodName string
configFile string
phase = "Running"
)
newPodName = getRandomString() + basePodName
configFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", template, "-p", "NAME="+newPodName,
"-p", "MEMORY="+annotations["MEMORY"], "-p", "CPU="+annotations["CPU"], "-p",
"INSTANCESIZE="+annotations["INSTANCESIZE"], "-p", "RUNTIMECLASSNAME="+runtimeClassName, "IMAGE="+workloadImage).OutputToFile(getRandomString() + "Pod-common.json")
o.Expect(err).NotTo(o.HaveOccurred())
return createKataPodFromTemplate(oc, podNs, newPodName, configFile, runtimeClassName, phase)
}
func createKataPodFromTemplate(oc *exutil.CLI, podNs, newPodName, configFile, runtimeClassName, phase string) (msg string, err error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile, "-n", podNs).Output()
if msg == "" || err != nil {
return msg, fmt.Errorf("Could not apply configFile %v: %v %v", configFile, msg, err)
}
g.By(fmt.Sprintf("Checking if pod %v is ready", newPodName))
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", phase, podSnooze*time.Second, 10*time.Second)
if msg == "" || err != nil {
return msg, fmt.Errorf("Could not get pod (%v) status %v: %v %v", newPodName, phase, msg, err)
}
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", newPodName, "-n", podNs, "-o=jsonpath={.spec.runtimeClassName}").Output()
if msg != runtimeClassName || err != nil {
err = fmt.Errorf("pod %v has wrong runtime %v, expecting %v %v", newPodName, msg, runtimeClassName, err)
}
return newPodName, err
}
// author: [email protected]
func createKataPod(oc *exutil.CLI, podNs, commonPod, basePodName, runtimeClassName, workloadImage string) string {
var (
err error
newPodName string
configFile string
phase = "Running"
)
newPodName = getRandomString() + basePodName
configFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", commonPod, "-p",
"NAME="+newPodName, "-p", "RUNTIMECLASSNAME="+runtimeClassName, "-p", "IMAGE="+workloadImage).OutputToFile(getRandomString() + "Pod-common.json")
o.Expect(err).NotTo(o.HaveOccurred())
podname, err := createKataPodFromTemplate(oc, podNs, newPodName, configFile, runtimeClassName, phase)
o.Expect(err).NotTo(o.HaveOccurred())
return podname
}
func deleteKataResource(oc *exutil.CLI, res, resNs, resName string) bool {
_, err := deleteResource(oc, res, resName, resNs, podSnooze*time.Second, 10*time.Second)
if err != nil {
return false
}
return true
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
func deleteKataConfig(oc *exutil.CLI, kcName string) (msg string, err error) {
g.By("(4.1) Trigger kataconfig deletion")
msg, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("kataconfig", kcName).Output()
if err != nil || msg == "" {
e2e.Logf("Unexpected error while trying to delete kataconfig: %v\nerror: %v", msg, err)
}
//SNO could become unavailable while restarting
//o.Expect(err).NotTo(o.HaveOccurred())
g.By("(4.2) Wait for kataconfig to be deleted")
errCheck := wait.Poll(30*time.Second, kataSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig").Output()
if strings.Contains(msg, "No resources found") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("kataconfig %v did not get deleted: %v %v", kcName, msg, err))
g.By("(4.3) kataconfig is gone")
return msg, err
}
// this function doesn't care to create kataconfig if it doesn't exist
func checkKataconfigIsCreated(oc *exutil.CLI, sub SubscriptionDescription, kcName string) (err error) {
jsonSubStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.subName, "-n", sub.namespace, "-o=jsonpath={.status}").Output()
if err != nil || gjson.Get(jsonSubStatus, "state").String() != "AtLatestKnown" {
return fmt.Errorf("issue with subscription or state isn't expected: %v, actual: %v error: %v", "AtLatestKnown", jsonSubStatus, err)
}
if !strings.Contains(gjson.Get(jsonSubStatus, "installedCSV").String(), sub.subName) {
return fmt.Errorf("Error: get installedCSV for subscription %v %v", jsonSubStatus, err)
}
csvName := gjson.Get(jsonSubStatus, "installedCSV").String()
jsonCsvStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", sub.namespace, "-o=jsonpath={.status}").Output()
if err != nil ||
gjson.Get(jsonCsvStatus, "phase").String() != "Succeeded" ||
gjson.Get(jsonCsvStatus, "reason").String() != "InstallSucceeded" {
return fmt.Errorf("Error: CSV %v in wrong state, expected: %v actual:\n%v %v", csvName, "InstallSucceeded", jsonCsvStatus, err)
}
// check kataconfig
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kcName, "-n", sub.namespace, kataconfigStatusQuery).Output()
if err == nil && strings.ToLower(msg) == "false" {
return nil
}
return fmt.Errorf("Error: Kataconfig in wrong state, expected: false actual: %v error: %v", msg, err)
}
func subscriptionIsFinished(oc *exutil.CLI, sub SubscriptionDescription) (msg string, err error) {
var (
csvName string
controlPod string
)
g.By("(2) Subscription checking")
msg, _ = checkResourceJsonpath(oc, "sub", sub.subName, sub.namespace, "-o=jsonpath={.status.state}", "AtLatestKnown", snooze*time.Second, 10*time.Second)
csvName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.subName, "-n", sub.namespace, "-o=jsonpath={.status.installedCSV}").Output()
if err != nil || csvName == "" {
e2e.Logf("ERROR: cannot get sub %v installedCSV %v %v", sub.subName, csvName, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
g.By("(2.1) Check that the csv '" + csvName + "' has finished")
msg, err = checkResourceJsonpath(oc, "csv", csvName, sub.namespace, "-o=jsonpath={.status.phase}{.status.reason}", "SucceededInstallSucceeded", snooze*time.Second, 10*time.Second)
// need controller-manager-service and controller-manager-* pod running before kataconfig
// oc get pod -o=jsonpath={.items..metadata.name} && find one w/ controller-manager
g.By("(2.2) Wait for controller manager pod to start")
// checkResourceJsonpath() needs exact pod name. control-manager deploy does not have full name
errCheck := wait.PollImmediate(10*time.Second, podSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items..metadata.name}", "-n", sub.namespace).Output()
if strings.Contains(msg, "controller-manager") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Controller manger pods did not start %v %v", msg, err))
// what is the pod name?
for _, controlPod = range strings.Fields(msg) {
if strings.Contains(controlPod, "controller-manager") {
break // no need to check the rest
}
}
// controller-podname -o=jsonpath={.status.containerStatuses} && !strings.Contains("false")
g.By("(2.3) Check that " + controlPod + " is ready")
// this checks that the 2 containers in the pod are not showing false. checkResourceJsonpath() cannot be used
errCheck = wait.PollImmediate(10*time.Second, podSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", controlPod, "-o=jsonpath={.status.containerStatuses}", "-n", sub.namespace).Output()
if !strings.Contains(strings.ToLower(msg), "false") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("control pod %v did not become ready: %v %v", controlPod, msg, err))
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.subName, "-n", sub.namespace, "--no-headers").Output()
return msg, err
}
// author: [email protected]
func waitForNodesInDebug(oc *exutil.CLI, opNamespace string) (msg string, err error) {
count := 0
workerNodeList, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
workerNodeCount := len(workerNodeList)
if workerNodeCount < 1 {
e2e.Logf("Error: no worker nodes: %v, %v", workerNodeList, err)
}
o.Expect(workerNodeList).NotTo(o.BeEmpty())
//e2e.Logf("Waiting for %v nodes to enter debug: %v", workerNodeCount, workerNodeList)
// loop all workers until they all have debug
errCheck := wait.Poll(10*time.Second, snooze*time.Second, func() (bool, error) {
count = 0
for index := range workerNodeList {
msg, err = oc.AsAdmin().Run("debug").Args("-n", opNamespace, "node/"+workerNodeList[index], "--", "chroot", "/host", "crio", "config").Output()
if strings.Contains(msg, "log_level = \"debug") {
count++
o.Expect(msg).To(o.ContainSubstring("log_level = \"debug"))
}
}
if count == workerNodeCount {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Error: only %v of %v total worker nodes are in debug: %v\n %v", count, workerNodeCount, workerNodeList, msg))
msg = fmt.Sprintf("All %v worker nodes are in debug mode: %v", workerNodeCount, workerNodeList)
err = nil
return msg, err
}
// author: [email protected]
func applyImageRedirect(oc *exutil.CLI, redirectFile, redirectType, redirectName string) error {
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", redirectFile).Output()
if err != nil {
return fmt.Errorf("ERROR applying %v: %v %v", redirectType, msg, err)
}
_, err = checkResourceExists(oc, redirectType, redirectName, "default", 360*time.Second, 10*time.Second)
return err
}
func waitForDeployment(oc *exutil.CLI, podNs, deployName string) (msg string, err error) {
var (
snooze time.Duration = 300
replicas string
)
replicas, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", podNs, deployName, "-o=jsonpath={.spec.replicas}").Output()
if err != nil {
e2e.Logf("replica fetch failed %v %v", replicas, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(replicas).NotTo(o.BeEmpty())
errCheck := wait.Poll(10*time.Second, snooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", podNs, deployName, "-o=jsonpath={.status.readyReplicas}").Output()
if msg == replicas {
return true, nil
}
return false, nil
})
if errCheck != nil {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", podNs, deployName, "-o=jsonpath={.status}").Output()
e2e.Logf("timed out %v != %v %v", replicas, msg, err)
msg = gjson.Get(msg, "readyReplicas").String()
}
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Deployment has %v replicas, not %v %v", replicas, msg, err))
return msg, err
}
func deleteDeployment(oc *exutil.CLI, deployNs, deployName string) bool {
return deleteKataResource(oc, "deploy", deployNs, deployName)
}
func getClusterVersion(oc *exutil.CLI) (clusterVersion, ocpMajorVer, ocpMinorVer string, minorVer int) {
jsonVersion, err := oc.AsAdmin().WithoutNamespace().Run("version").Args("-o", "json").Output()
if err != nil || jsonVersion == "" || !gjson.Get(jsonVersion, "openshiftVersion").Exists() {
e2e.Logf("Error: could not get oc version: %v %v", jsonVersion, err)
}
clusterVersion = gjson.Get(jsonVersion, "openshiftVersion").String()
sa := strings.Split(clusterVersion, ".")
ocpMajorVer = sa[0]
ocpMinorVer = sa[1]
minorVer, _ = strconv.Atoi(ocpMinorVer)
return clusterVersion, ocpMajorVer, ocpMinorVer, minorVer
}
func waitForKataconfig(oc *exutil.CLI, kcName, opNamespace string) (msg string, err error) {
// Installing/deleting kataconfig reboots nodes. AWS BM takes 20 minutes/node
errCheck := wait.Poll(30*time.Second, kataSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kcName, "-n", opNamespace, kataconfigStatusQuery).Output()
if strings.ToLower(msg) == "false" {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("kataconfig %v did not finish install", kcName))
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kcName, "--no-headers").Output()
msg = "SUCCESS kataconfig is created " + msg
return msg, err
}
func changeSubscriptionCatalog(oc *exutil.CLI, subscription SubscriptionDescription, testrun TestRunDescription) (msg string, err error) {
// check for catsrc existence before calling
patch := fmt.Sprintf("{\"spec\":{\"source\":\"%v\"}}", testrun.catalogSourceName)
msg, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("sub", subscription.subName, "--type", "merge", "-p", patch, "-n", subscription.namespace).Output()
return msg, err
}
func changeSubscriptionChannel(oc *exutil.CLI, subscription SubscriptionDescription, testrun TestRunDescription) (msg string, err error) {
patch := fmt.Sprintf("{\"spec\":{\"channel\":\"%v\"}}", testrun.channel)
msg, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("sub", subscription.subName, "--type", "merge", "-p", patch, "-n", subscription.namespace).Output()
return msg, err
}
func logErrorAndFail(oc *exutil.CLI, logMsg, msg string, err error) {
e2e.Logf("%v: %v %v", logMsg, msg, err)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).NotTo(o.BeEmpty())
}
func checkAndLabelCustomNodes(oc *exutil.CLI, testrun TestRunDescription) {
e2e.Logf("check and label nodes (or single node for custom label)")
nodeCustomList := exutil.GetNodeListByLabel(oc, customLabel)
if len(nodeCustomList) > 0 {
e2e.Logf("labeled nodes found %v", nodeCustomList)
} else {
if testrun.labelSingleNode {
node, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
LabelNode(oc, node, customLabel)
} else {
labelSelectedNodes(oc, workerLabel, customLabel)
}
}
}
func labelEligibleNodes(oc *exutil.CLI, testrun TestRunDescription) {
e2e.Logf("Label worker nodes for eligibility feature")
if testrun.eligibleSingleNode {
node, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
LabelNode(oc, node, featureLabel)
} else {
labelSelectedNodes(oc, workerLabel, featureLabel)
}
}
func labelSelectedNodes(oc *exutil.CLI, selectorLabel, customLabel string) {
nodeList := exutil.GetNodeListByLabel(oc, selectorLabel)
if len(nodeList) > 0 {
for _, node := range nodeList {
LabelNode(oc, node, customLabel)
}
}
}
func LabelNode(oc *exutil.CLI, node, customLabel string) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, customLabel).Output()
e2e.Logf("%v applied and output was: %v %v", customLabel, msg, err)
o.Expect(err).NotTo(o.HaveOccurred())
}
func getInstancesOnNode(oc *exutil.CLI, opNamespace, node string) (instances int, err error) {
cmd := fmt.Sprintf("ps -ef | grep uuid | grep -v grep | wc -l")
msg, err := exutil.DebugNodeWithOptionsAndChroot(oc, node, []string{"-q"}, "bin/sh", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
instances, err = strconv.Atoi(strings.TrimSpace(msg))
if err != nil {
instances = 0
}
return instances, err
}
func getTotalInstancesOnNodes(oc *exutil.CLI, opNamespace string, nodeList []string) (total int) {
total = 0
count := 0
for _, node := range nodeList {
count, _ = getInstancesOnNode(oc, opNamespace, node)
e2e.Logf("found %v VMs on node %v", count, node)
total += count
}
e2e.Logf("Total %v VMs on all nodes", total)
return total
}
func getAllKataNodes(oc *exutil.CLI, eligibility bool, opNamespace, featureLabel, customLabel string) (nodeNameList []string) {
actLabel := customLabel
if eligibility {
actLabel = featureLabel
}
return exutil.GetNodeListByLabel(oc, actLabel)
}
func getHttpResponse(url string, expStatusCode int) (resp string, err error) {
resp = ""
res, err := http.Get(url)
if err == nil {
defer res.Body.Close()
if res.StatusCode != expStatusCode {
err = fmt.Errorf("Response from url=%v\n actual status code=%d doesn't match expected %d\n", url, res.StatusCode, expStatusCode)
} else {
body, err := io.ReadAll(res.Body)
if err == nil {
resp = string(body)
}
}
}
return resp, err
}
// create a service and route for the deployment, both with the same name as deployment itself
// require defer deleteRouteAndService to cleanup
func createServiceAndRoute(oc *exutil.CLI, deployName, podNs string) (host string, err error) {
msg, err := oc.WithoutNamespace().Run("expose").Args("deployment", deployName, "-n", podNs).Output()
if err != nil {
e2e.Logf("Expose deployment failed with: %v %v", msg, err)
} else {
msg, err = oc.Run("expose").Args("service", deployName, "-n", podNs).Output()
if err != nil {
e2e.Logf("Expose service failed with: %v %v", msg, err)
} else {
host, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", deployName, "-n", podNs, "-o=jsonpath={.spec.host}").Output()
if err != nil || host == "" {
e2e.Logf("Failed to get host from route, actual host=%v\n error %v", host, err)
}
host = strings.Trim(host, "'")
}
}
return host, err
}
// cleanup for createServiceAndRoute func
func deleteRouteAndService(oc *exutil.CLI, deployName, podNs string) {
// oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "-n", podNs, deployName, "--ignore-not-found").Execute()
// oc.AsAdmin().WithoutNamespace().Run("delete").Args("route", "-n", podNs, deployName, "--ignore-not-found").Execute()
_, _ = deleteResource(oc, "svc", deployName, podNs, podSnooze*time.Second, 10*time.Second)
_, _ = deleteResource(oc, "route", deployName, podNs, podSnooze*time.Second, 10*time.Second)
}
func checkPeerPodSecrets(oc *exutil.CLI, opNamespace, provider string, ppSecretName string) (msg string, err error) {
var (
errors = 0
errorList []string
providerVars []string
)
switch provider {
case "azure":
providerVars = append(providerVars, "AZURE_CLIENT_ID", "AZURE_CLIENT_SECRET", "AZURE_SUBSCRIPTION_ID", "AZURE_TENANT_ID")
case "aws":
providerVars = append(providerVars, "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY")
case "libvirt":
providerVars = append(providerVars, "LIBVIRT_URI", "LIBVIRT_POOL", "LIBVIRT_VOL_NAME")
default:
msg = fmt.Sprintf("Cloud provider %v is not supported", provider)
err = fmt.Errorf("%v", msg)
return msg, err
}
jsonData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secrets", ppSecretName, "-n", opNamespace, "-o=jsonpath={.data}").Output()
if err != nil {
msg = fmt.Sprintf("Secret for %v not exists", provider)
err = fmt.Errorf("%v", msg)
return msg, err
}
for index := range providerVars {
if !gjson.Get(jsonData, providerVars[index]).Exists() || gjson.Get(jsonData, providerVars[index]).String() == "" {
errors++
errorList = append(errorList, providerVars[index])
}
}
msg = ""
if errors != 0 {
msg = fmt.Sprintf("ERROR missing vars in secret %v %v", errors, errorList)
err = fmt.Errorf("%v", msg)
}
return msg, err
}
func decodeSecret(input string) (msg string, err error) {
debase64, err := base64.StdEncoding.DecodeString(input)
if err != nil {
msg = fmt.Sprintf("Was not able to decode %v. %v %v", input, debase64, err)
} else {
msg = fmt.Sprintf("%s", debase64)
}
return msg, err
}
func checkPeerPodConfigMap(oc *exutil.CLI, opNamespace, provider, ppConfigMapName string) (msg string, err error) {
var (
errors = 0
errorList []string
providerVars []string
)
switch provider {
case "azure":
providerVars = append(providerVars, "CLOUD_PROVIDER", "AZURE_NSG_ID", "AZURE_SUBNET_ID", "VXLAN_PORT", "AZURE_REGION", "AZURE_RESOURCE_GROUP")
case "aws":
providerVars = append(providerVars, "CLOUD_PROVIDER", "AWS_REGION", "AWS_SG_IDS", "AWS_SUBNET_ID", "AWS_VPC_ID", "VXLAN_PORT")
case "libvirt":
providerVars = append(providerVars, "CLOUD_PROVIDER")
default:
msg = fmt.Sprintf("Cloud provider %v is not supported", provider)
err = fmt.Errorf("%v", msg)
return msg, err
}
jsonData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", ppConfigMapName, "-n", opNamespace, "-o=jsonpath={.data}").Output()
if err != nil {
msg = fmt.Sprintf("Configmap for %v not exists", provider)
err = fmt.Errorf("%v", msg)
return msg, err
}
for index := range providerVars {
if !gjson.Get(jsonData, providerVars[index]).Exists() || gjson.Get(jsonData, providerVars[index]).String() == "" {
errors++
errorList = append(errorList, providerVars[index])
}
}
msg = ""
if errors != 0 {
msg = fmt.Sprintf("ERROR missing vars in configmap %v %v", errors, errorList)
err = fmt.Errorf("%v", msg)
}
return msg, err
}
func checkPeerPodControl(oc *exutil.CLI, opNamespace, expStatus string) (msg string, err error) {
// This would check peer pod webhook pod , peerpodconfig-ctrl-caa pods , webhook service and endpoints attached to the svc
//TODO: should add podvm image builder pod completed?
var (
peerpodconfigCtrlCaaPods []string
webhookPods []string
webhooksvc = "peer-pods-webhook-svc"
)
g.By("Check for peer pods webhook pod")
// checkResourceJsonpath needs a pod name
errCheck := wait.PollImmediate(10*time.Second, podSnooze*time.Second, func() (bool, error) {
msg, err := oc.AsAdmin().Run("get").Args("pod", "-o=jsonpath={.items..metadata.name}", "-n", opNamespace).Output()
if err != nil {
return false, err
}
if strings.Contains(msg, "peer-pods-webhook") {
return true, nil
}
return false, nil
})
if err != nil || msg == "" || errCheck != nil {
e2e.Logf(" %v %v, %v", msg, err, errCheck)
}
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("peer pod webhook pod did not start: %v", errCheck))
//webhook pod names
msg, err = oc.AsAdmin().Run("get").Args("pod", "-o=jsonpath={.items..metadata.name}", "-n", opNamespace).Output()
for _, whPod := range strings.Fields(msg) {
if strings.Contains(whPod, "peer-pods-webhook") {
webhookPods = append(webhookPods, whPod)
}
}
//count check
whPodCount := len(webhookPods)
if whPodCount != 2 {
e2e.Logf("There should be two webhook pods, instead there are: %v", whPodCount)
return
}
//pod state check
for _, podName := range webhookPods {
checkControlPod(oc, podName, opNamespace, expStatus)
}
g.By("Check for peer pods ctrl caa pod")
// checkResourceJsonpath needs a podname
errCheck = wait.PollImmediate(10*time.Second, podSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().Run("get").Args("pod", "-o=jsonpath={.items..metadata.name}", "-n", opNamespace).Output()
if strings.Contains(msg, "peerpodconfig-ctrl-caa-daemon") {
return true, nil
}
return false, nil
})
if err != nil || msg == "" || errCheck != nil {
e2e.Logf(" %v %v, %v", msg, err, errCheck)
}
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("peer pod ctrl caa pod did not start %v %v", msg, err))
//peerpodconfig ctrl CAA pod names
msg, err = oc.AsAdmin().Run("get").Args("pod", "-o=jsonpath={.items..metadata.name}", "-n", opNamespace).Output()
for _, ppconfigCaaPod := range strings.Fields(msg) {
if strings.Contains(ppconfigCaaPod, "peerpodconfig-ctrl-caa") {
peerpodconfigCtrlCaaPods = append(peerpodconfigCtrlCaaPods, ppconfigCaaPod)
}
}
//pod state check
for _, podName := range peerpodconfigCtrlCaaPods {
checkControlPod(oc, podName, opNamespace, expStatus)
}
//webhook service
checkControlSvc(oc, opNamespace, webhooksvc)
g.By("SUCCESS - peerpod config check passed")
return msg, err
}
func checkControlPod(oc *exutil.CLI, podName, podNs, expStatus string) (msg string, err error) {
msg, err = checkResourceJsonpath(oc, "pods", podName, podNs, "-o=jsonpath={.status.phase}", expStatus, podSnooze*time.Second, 10*time.Second)
return msg, err
}
func checkControlSvc(oc *exutil.CLI, svcNs, svcName string) (msg string, err error) {
g.By("Check for " + svcName + "service")
msg, err = checkResourceJsonpath(oc, "service", svcName, svcNs, "-o=jsonpath={.metadata.name}", svcName, podSnooze*time.Second, 10*time.Second)
g.By("Check for " + svcName + "service endpoints")
// checkResourceJsonpath does strings.Contains not ContainsAny
errCheck := wait.PollImmediate(10*time.Second, podSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().Run("get").Args("ep", svcName, "-n", svcNs, "-o=jsonpath={.subsets..addresses..ip}").Output()
if strings.ContainsAny(msg, "0123456789") {
return true, nil
}
return false, nil
})
if err != nil || msg == "" || errCheck != nil {
e2e.Logf(" %v %v, %v", msg, err, errCheck)
}
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v does not have endpoints attached to it; err: %v", svcName, err))
g.By("SUCCESS - service check passed")
return msg, err
}
func checkResourceExists(oc *exutil.CLI, resType, resName, resNs string, duration, interval time.Duration) (msg string, err error) {
// working: pod, deploy, service, route, ep, ds
errCheck := wait.PollImmediate(interval, duration, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, resName, "-n", resNs, "--no-headers").Output()
if strings.Contains(msg, resName) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v %v was not found in ns %v after %v sec: %v %v", resType, resName, resNs, duration, msg, err))
return msg, nil
}
func checkResourceJsonpath(oc *exutil.CLI, resType, resName, resNs, jsonpath, expected string, duration, interval time.Duration) (msg string, err error) {
// resType=pod, -o=jsonpath='{.status.phase}', expected="Running"
// resType=deploy, -o=jsonpath='{.status.conditions[?(@.type=="Available")].status}', expected="True"
// resType=route, -o=jsonpath='{.status.ingress..conditions[?(@.type==\"Admitted\")].status}', expected="True"
// resType=ds, -o=jsonpath='{.status.ingress..conditions[?(@.type==\"Admitted\")].status}'", expected= number of nodes w/ kata-oc
// fmt.Sprintf("%v", len(exutil.GetNodeListByLabel(oc, kataocLabel)))
/* readyReplicas might not exist in .status!
// resType=deploy, -o=jsonpath='{.status.readyReplicas}', expected = spec.replicas
*/
errCheck := wait.PollImmediate(interval, duration, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, resName, "-n", resNs, jsonpath).Output()
if strings.Contains(msg, expected) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v %v in ns %v is not in %v state after %v sec: %v %v", resType, resName, resNs, expected, duration, msg, err))
return msg, nil
}
func deleteResource(oc *exutil.CLI, res, resName, resNs string, duration, interval time.Duration) (msg string, err error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args(res, resName, "-n", resNs, "--ignore-not-found").Output()
if err != nil {
msg = fmt.Sprintf("ERROR: Cannot start deleting %v %v -n %v: %v %v", res, resName, resNs, msg, err)
e2e.Failf(msg)
}
// make sure it doesn't exist
errCheck := wait.PollImmediate(interval, duration, func() (bool, error) {
msg, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(res, resName, "-n", resNs, "--no-headers").Output()
if strings.Contains(msg, "not found") {
return true, nil
}
return false, nil
})
if errCheck != nil {
e2e.Logf("ERROR: Timeout waiting for delete to finish on %v %v -n %v: %v", res, resName, resNs, msg)
}
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v %v was not finally deleted in ns %v", res, resName, resNs))
msg = fmt.Sprintf("deleted %v %v -n %v: %v %v", res, resName, resNs, msg, err)
err = nil
return msg, err
}
func createApplyPeerPodSecrets(oc *exutil.CLI, provider string, ppParam PeerpodParam, opNamespace, ppSecretName, secretTemplate string) (msg string, err error) {
var (
ciCmName = "peerpods-param-cm"
ciSecretName = "peerpods-param-secret"
)
// Check if the secrets already exist
g.By("Checking if peer-pods-secret exists")
msg, err = checkPeerPodSecrets(oc, opNamespace, provider, ppSecretName)
if err == nil && msg == "" {
e2e.Logf("peer-pods-secret exists - skipping creating it")
return msg, err
}
// e2e.Logf("**** peer-pods-secret not found on the cluster - proceeding to create it****")
//Read params from peerpods-param-cm and store in ppParam struct
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default").Output()
if err != nil {
e2e.Logf("%v Configmap created by QE CI not found: msg %v err: %v", ciCmName, msg, err)
} else {
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Configmap created by QE CI has error, no .data: %v %v", ciCmName, configmapData, err)
}
e2e.Logf("configmap Data is:\n%v", configmapData)
ppParam, err := parseCIPpConfigMapData(provider, configmapData)
if err != nil {
return msg, err
}
var secretFilePath string
if provider == "aws" {
secretFilePath, err = createAWSPeerPodSecrets(oc, ppParam, ciSecretName, secretTemplate)
} else if provider == "azure" {
secretFilePath, err = createAzurePeerPodSecrets(oc, ppParam, ciSecretName, secretTemplate)
} else if provider == "libvirt" {
secretFilePath, err = createLibvirtPeerPodSecrets(oc, ppParam, ciSecretName, secretTemplate)
} else {
msg = fmt.Sprintf("Cloud provider %v is not supported", provider)
return msg, fmt.Errorf("%v", msg)
}
if err != nil {
return msg, err
}
g.By("(Apply peer-pods-secret file)")
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", secretFilePath).Output()
if err != nil {
e2e.Logf("Error: applying peer-pods-secret %v failed: %v %v", secretFilePath, msg, err)
}
if errRemove := os.Remove(secretFilePath); errRemove != nil {
e2e.Logf("Error: removing secret file %v failed: %v", secretFilePath, errRemove)
}
}
return msg, err
}
func createApplyPeerPodsParamLibvirtConfigMap(oc *exutil.CLI, provider string, ppParam PeerpodParam, opNamespace, ppConfigMapName, ppConfigMapTemplate string) (msg string, err error) {
var (
ciCmName = "peerpods-param-cm"
configFile string
)
g.By("Checking if libvirt-podvm-image-cm exists")
_, err = checkPeerPodConfigMap(oc, opNamespace, provider, ppConfigMapName)
if err == nil {
e2e.Logf("libvirt-podvm-image-cm exists - skipping creating it")
return msg, err
} else if err != nil {
e2e.Logf("**** libvirt-podvm-image-cm not found on the cluster - proceeding to create it****")
}
// Read params from libvirt-podvm-image-cm and store in ppParam struct
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default").Output()
if err != nil {
e2e.Logf("%v Configmap created by QE CI not found: msg %v err: %v", ciCmName, msg, err)
} else {
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Configmap created by QE CI has error, no .data: %v %v", ciCmName, configmapData, err)
}
ppParam, err := parseCIPpConfigMapData(provider, configmapData)
if err != nil {
return msg, err
}
// Create libvirt-podvm-image-cm file
if provider == "libvirt" {
configFile, err = createLibvirtPeerPodsParamConfigMap(oc, ppParam, ppConfigMapTemplate)
} else {
msg = fmt.Sprintf("Cloud provider %v is not supported", provider)
return msg, fmt.Errorf("%v", msg)
}
if err != nil {
return msg, err
}
// Apply libvirt-podvm-image-cm file
g.By("(Apply libvirt-podvm-image-cm file)")
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Output()
if err != nil {
return fmt.Sprintf("Error: applying libvirt-podvm-image-cm %v failed: %v %v", configFile, msg, err), err
}
}
return msg, err
}
func parseCIPpConfigMapData(provider, configmapData string) (PeerpodParam, error) {
var ppParam PeerpodParam
switch provider {
case "aws":
return parseAWSCIConfigMapData(configmapData)
case "azure":
return parseAzureCIConfigMapData(configmapData)
case "libvirt":
return parseLibvirtCIConfigMapData(configmapData)
default:
return ppParam, fmt.Errorf("Cloud provider %v is not supported", provider)
}
}
func parseLibvirtCIConfigMapData(configmapData string) (PeerpodParam, error) {
var ppParam PeerpodParam
if gjson.Get(configmapData, "PROXY_TIMEOUT").Exists() {
ppParam.PROXY_TIMEOUT = gjson.Get(configmapData, "PROXY_TIMEOUT").String()
}
if gjson.Get(configmapData, "LIBVIRT_KVM_HOST_ADDRESS").Exists() {
ppParam.LIBVIRT_KVM_HOST_ADDRESS = gjson.Get(configmapData, "LIBVIRT_KVM_HOST_ADDRESS").String()
}
if gjson.Get(configmapData, "PODVM_DISTRO").Exists() {
ppParam.LIBVIRT_PODVM_DISTRO = gjson.Get(configmapData, "PODVM_DISTRO").String()
}
if gjson.Get(configmapData, "CAA_SRC").Exists() {
ppParam.LIBVIRT_CAA_SRC = gjson.Get(configmapData, "CAA_SRC").String()
}
if gjson.Get(configmapData, "CAA_REF").Exists() {
ppParam.LIBVIRT_CAA_REF = gjson.Get(configmapData, "CAA_REF").String()
}
if gjson.Get(configmapData, "DOWNLOAD_SOURCES").Exists() {
ppParam.LIBVIRT_DOWNLOAD_SOURCES = gjson.Get(configmapData, "DOWNLOAD_SOURCES").String()
}
if gjson.Get(configmapData, "CONFIDENTIAL_COMPUTE_ENABLED").Exists() {
ppParam.LIBVIRT_CONFIDENTIAL_COMPUTE_ENABLED = gjson.Get(configmapData, "CONFIDENTIAL_COMPUTE_ENABLED").String()
}
if gjson.Get(configmapData, "UPDATE_PEERPODS_CM").Exists() {
ppParam.LIBVIRT_UPDATE_PEERPODS_CM = gjson.Get(configmapData, "UPDATE_PEERPODS_CM").String()
}
if gjson.Get(configmapData, "ORG_ID").Exists() {
ppParam.LIBVIRT_ORG_ID = gjson.Get(configmapData, "ORG_ID").String()
}
if gjson.Get(configmapData, "BASE_OS_VERSION").Exists() {
ppParam.LIBVIRT_BASE_OS_VERSION = gjson.Get(configmapData, "BASE_OS_VERSION").String()
}
if gjson.Get(configmapData, "IMAGE_NAME").Exists() {
ppParam.LIBVIRT_IMAGE_NAME = gjson.Get(configmapData, "IMAGE_NAME").String()
}
if gjson.Get(configmapData, "PODVM_TAG").Exists() {
ppParam.LIBVIRT_PODVM_TAG = gjson.Get(configmapData, "PODVM_TAG").String()
}
if gjson.Get(configmapData, "SE_BOOT").Exists() {
ppParam.LIBVIRT_SE_BOOT = gjson.Get(configmapData, "SE_BOOT").String()
}
if gjson.Get(configmapData, "PODVM_IMAGE_URI").Exists() {
ppParam.LIBVIRT_PODVM_IMAGE_URI = gjson.Get(configmapData, "PODVM_IMAGE_URI").String()
}
return ppParam, nil
}
func parseAWSCIConfigMapData(configmapData string) (PeerpodParam, error) {
var ppParam PeerpodParam
if gjson.Get(configmapData, "AWS_REGION").Exists() {
ppParam.AWS_REGION = gjson.Get(configmapData, "AWS_REGION").String()
}
if gjson.Get(configmapData, "AWS_SUBNET_ID").Exists() {
ppParam.AWS_SUBNET_ID = gjson.Get(configmapData, "AWS_SUBNET_ID").String()
}
if gjson.Get(configmapData, "AWS_VPC_ID").Exists() {
ppParam.AWS_VPC_ID = gjson.Get(configmapData, "AWS_VPC_ID").String()
}
if gjson.Get(configmapData, "AWS_SG_IDS").Exists() {
ppParam.AWS_SG_IDS = gjson.Get(configmapData, "AWS_SG_IDS").String()
}
if gjson.Get(configmapData, "VXLAN_PORT").Exists() {
ppParam.VXLAN_PORT = gjson.Get(configmapData, "VXLAN_PORT").String()
}
if gjson.Get(configmapData, "PODVM_INSTANCE_TYPE").Exists() {
ppParam.PODVM_INSTANCE_TYPE = gjson.Get(configmapData, "PODVM_INSTANCE_TYPE").String()
}
if gjson.Get(configmapData, "PROXY_TIMEOUT").Exists() {
ppParam.PROXY_TIMEOUT = gjson.Get(configmapData, "PROXY_TIMEOUT").String()
}
return ppParam, nil
}
func parseAzureCIConfigMapData(configmapData string) (PeerpodParam, error) {
var ppParam PeerpodParam
if gjson.Get(configmapData, "AZURE_REGION").Exists() {
ppParam.AZURE_REGION = gjson.Get(configmapData, "AZURE_REGION").String()
}
if gjson.Get(configmapData, "AZURE_RESOURCE_GROUP").Exists() {
ppParam.AZURE_RESOURCE_GROUP = gjson.Get(configmapData, "AZURE_RESOURCE_GROUP").String()
}
if gjson.Get(configmapData, "VXLAN_PORT").Exists() {
ppParam.VXLAN_PORT = gjson.Get(configmapData, "VXLAN_PORT").String()
}
if gjson.Get(configmapData, "AZURE_INSTANCE_SIZE").Exists() {
ppParam.AZURE_INSTANCE_SIZE = gjson.Get(configmapData, "AZURE_INSTANCE_SIZE").String()
}
if gjson.Get(configmapData, "AZURE_SUBNET_ID").Exists() {
ppParam.AZURE_SUBNET_ID = gjson.Get(configmapData, "AZURE_SUBNET_ID").String()
}
if gjson.Get(configmapData, "AZURE_NSG_ID").Exists() {
ppParam.AZURE_NSG_ID = gjson.Get(configmapData, "AZURE_NSG_ID").String()
}
if gjson.Get(configmapData, "PROXY_TIMEOUT").Exists() {
ppParam.PROXY_TIMEOUT = gjson.Get(configmapData, "PROXY_TIMEOUT").String()
}
return ppParam, nil
}
func createLibvirtPeerPodSecrets(oc *exutil.CLI, ppParam PeerpodParam, ciSecretName, secretTemplate string) (string, error) {
var (
secretString string
)
secretString, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", ciSecretName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil || secretString == "" {
e2e.Logf("Error: %v CI provided peer pods secret data empty", err)
return "", err
}
var (
LIBVIRT_URI string
LIBVIRT_POOL string
LIBVIRT_VOL_NAME string
ACTIVATION_KEY string
REDHAT_OFFLINE_TOKEN string
HOST_KEY_CERTS string
)
fields := map[string]*string{
"LIBVIRT_URI": &LIBVIRT_URI,
"LIBVIRT_POOL": &LIBVIRT_POOL,
"LIBVIRT_VOL_NAME": &LIBVIRT_VOL_NAME,
"ACTIVATION_KEY": &ACTIVATION_KEY,
"REDHAT_OFFLINE_TOKEN": &REDHAT_OFFLINE_TOKEN,
"HOST_KEY_CERTS": &HOST_KEY_CERTS,
}
for key, valuePtr := range fields {
encodedValue := gjson.Get(secretString, key).String()
if encodedValue == "" {
e2e.Logf("Warning: %v field is empty", key)
continue
}
decodedValue, err := decodeSecret(encodedValue)
if err != nil {
e2e.Logf("Error decoding %v: %v", key, err)
return "", err
}
*valuePtr = decodedValue
}
// Check for libvirt credentials
if LIBVIRT_POOL == "" || LIBVIRT_URI == "" || LIBVIRT_VOL_NAME == "" || REDHAT_OFFLINE_TOKEN == "" || ACTIVATION_KEY == "" {
msg := "Libvirt credentials not found in the data."
return msg, fmt.Errorf("Libvirt credentials not found")
}
// Construct the secretJSON for Libvirt
secretJSON := map[string]interface{}{
"apiVersion": "v1",
"kind": "Secret",
"type": "Opaque",
"metadata": map[string]string{
"name": "peer-pods-secret",
"namespace": "openshift-sandboxed-containers-operator",
},
"stringData": map[string]string{
"CLOUD_PROVIDER": "libvirt",
"LIBVIRT_URI": LIBVIRT_URI,
"LIBVIRT_POOL": LIBVIRT_POOL,
"LIBVIRT_VOL_NAME": LIBVIRT_VOL_NAME,
"REDHAT_OFFLINE_TOKEN": REDHAT_OFFLINE_TOKEN,
"ACTIVATION_KEY": ACTIVATION_KEY,
"HOST_KEY_CERTS": HOST_KEY_CERTS,
},
}
// Marshal the JSON to a string
secretJSONString, err := json.Marshal(secretJSON)
if err != nil {
return "", err
}
// Write the JSON string to the secretTemplate file
err = os.WriteFile(secretTemplate, []byte(secretJSONString), 0644)
if err != nil {
return "", err
}
return secretTemplate, nil
}
func createAWSPeerPodSecrets(oc *exutil.CLI, ppParam PeerpodParam, ciSecretName, secretTemplate string) (string, error) {
var (
secretString string
decodedString string
lines []string
)
// Read peerpods-param-secret to fetch the keys
secretString, err := oc.AsAdmin().Run("get").Args("secret", ciSecretName, "-n", "default", "-o=jsonpath={.data.aws}").Output()
if err != nil || secretString == "" {
e2e.Logf("Error: %v CI provided peer pods secret data empty", err)
return "", err
}
decodedString, err = decodeSecret(secretString)
if err != nil {
return "", err
}
lines = strings.Split(decodedString, "\n")
accessKey := ""
secretKey := ""
for _, line := range lines {
parts := strings.Split(line, "=")
if len(parts) == 2 {
key := strings.TrimSpace(parts[0])
value := strings.TrimSpace(parts[1])
if key == "aws_access_key_id" {
accessKey = value
} else if key == "aws_secret_access_key" {
secretKey = value
}
}
}
// Check for AWS credentials
if accessKey == "" || secretKey == "" {
msg := "AWS credentials not found in the data."
return msg, fmt.Errorf("AWS credentials not found")
}
// create AWS specific secret file logic here
// Construct the secretJSON for AWS
secretJSON := map[string]interface{}{
"apiVersion": "v1",
"kind": "Secret",
"type": "Opaque",
"metadata": map[string]string{
"name": "peer-pods-secret",
"namespace": "openshift-sandboxed-containers-operator",
},
"stringData": map[string]string{
"AWS_ACCESS_KEY_ID": accessKey,
"AWS_SECRET_ACCESS_KEY": secretKey,
},
}
// Marshal the JSON to a string
secretJSONString, err := json.Marshal(secretJSON)
if err != nil {
return "", err
}
// Write the JSON string to the secretTemplate file
err = os.WriteFile(secretTemplate, []byte(secretJSONString), 0644)
if err != nil {
return "", err
}
return secretTemplate, nil
}
func createAzurePeerPodSecrets(oc *exutil.CLI, ppParam PeerpodParam, ciSecretName, secretTemplate string) (string, error) {
var (
secretString string
decodedString string
)
// Read peerpods-param-secret to fetch the keys
secretString, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", ciSecretName, "-n", "default", "-o=jsonpath={.data.azure}").Output()
if err != nil || secretString == "" {
e2e.Logf("Error: %v CI provided peer pods secret data empty", err)
return "", err
}
decodedString, err = decodeSecret(secretString)
if err != nil {
e2e.Logf("Error: %v CI provided peer pods secret data can't be decoded", err)
return "", err
}
//check for all the keys and empty values
if !(gjson.Get(decodedString, "subscriptionId").Exists() && gjson.Get(decodedString, "clientId").Exists() &&
gjson.Get(decodedString, "clientSecret").Exists() && gjson.Get(decodedString, "tenantId").Exists()) ||
gjson.Get(decodedString, "subscriptionId").String() == "" || gjson.Get(decodedString, "clientId").String() == "" ||
gjson.Get(decodedString, "clientSecret").String() == "" || gjson.Get(decodedString, "tenantId").String() == "" {
msg := "Azure credentials not found or partial in the data."
return msg, fmt.Errorf("Azure credentials not found")
}
// create Azure specific secret file logic here
// Construct the secretJSON for Azure
secretJSON := map[string]interface{}{
"apiVersion": "v1",
"kind": "Secret",
"type": "Opaque",
"metadata": map[string]string{
"name": "peer-pods-secret",
"namespace": "openshift-sandboxed-containers-operator",
},
"stringData": map[string]string{
"AZURE_CLIENT_ID": gjson.Get(decodedString, "clientId").String(),
"AZURE_CLIENT_SECRET": gjson.Get(decodedString, "clientSecret").String(),
"AZURE_TENANT_ID": gjson.Get(decodedString, "tenantId").String(),
"AZURE_SUBSCRIPTION_ID": gjson.Get(decodedString, "subscriptionId").String(),
},
}
// Marshal the JSON to a string
secretJSONString, err := json.Marshal(secretJSON)
if err != nil {
return "", err
}
// Write the JSON string to the secretTemplate file
err = os.WriteFile(secretTemplate, []byte(secretJSONString), 0644)
if err != nil {
return "", err
}
return secretTemplate, nil
}
// Get the cloud provider type of the test environment copied from test/extended/storage/utils
func getCloudProvider(oc *exutil.CLI) string {
var (
errMsg error
output string
cloudprovider string
)
err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
output, errMsg = oc.WithoutNamespace().AsAdmin().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output()
if errMsg != nil {
e2e.Logf("Get cloudProvider *failed with* :\"%v\",wait 5 seconds retry.", errMsg)
return false, errMsg
}
cloudprovider = strings.ToLower(output)
if cloudprovider == "none" {
cloudprovider = "libvirt"
}
e2e.Logf("The test cluster cloudProvider is :\"%s\".", strings.ToLower(cloudprovider))
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Waiting for get cloudProvider timeout")
return strings.ToLower(cloudprovider)
}
func createRWOfilePVC(oc *exutil.CLI, opNamespace, pvcName, capacity string) (err error) {
// author: [email protected]
// creates a PVC using as much calculated and default paramers as possible, leaving only:
// napespace
// Capacity in Gigs
// Name
// returns err
accessMode := "ReadWriteOnce" //ReadWriteOnce, ReadOnlyMany or ReadWriteMany
volumeMode := "Filesystem" //Filesystem, Block
return createPVC(oc, opNamespace, pvcName, capacity, volumeMode, accessMode)
}
func createPVC(oc *exutil.CLI, opNamespace, pvcName, capacity, volumeMode, accessMode string) (err error) {
// just single Storage class per platform, block will be supported later?
const jsonCsiClass = `{"azure":{"Filesystem":"azurefile-csi","Block":"managed-csi"},
"gcp":{"Filesystem":"standard-csi","Block":"standard-csi"},
"aws":{"Filesystem":"gp3-csi","Block":"gp3-csi"}}`
cloudPlatform := getCloudProvider(oc)
scName := gjson.Get(jsonCsiClass, strings.Join([]string{cloudPlatform, volumeMode}, `.`)).String()
pvcDataDir := exutil.FixturePath("testdata", "storage")
pvcTemplate := filepath.Join(pvcDataDir, "pvc-template.yaml")
//validate provided capacity is a valid integer
_, err = strconv.Atoi(capacity)
if err != nil {
return err
}
g.By("Create pvc from template")
pvcFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", pvcTemplate,
"-p", "SCNAME="+scName, "-p", "PVCNAME="+pvcName, "-p", "PVCNAMESPACE="+opNamespace,
"-p", "ACCESSMODE="+accessMode, "-p", "VOLUMEMODE="+volumeMode, "-p", "PVCCAPACITY="+capacity).OutputToFile(getRandomString() + "pvc-default.json")
if err != nil {
e2e.Logf("Could not create pvc %v %v", pvcFile, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Applying pvc " + pvcFile)
msg, err := oc.AsAdmin().Run("apply").Args("-f", pvcFile, "-n", opNamespace).Output()
if err != nil {
e2e.Logf("Could not apply pvc %v %v", msg, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("pvc apply output: %v", msg)
return err
}
func createApplyPeerPodConfigMap(oc *exutil.CLI, provider string, ppParam PeerpodParam, opNamespace, ppConfigMapName, ppConfigMapTemplate string) (msg string, err error) {
/*
Reads the configmap that the CI had applied "peerpods-param-cm"
and creates "peer-pods-cm" from it and then applies it on the cluster.
Checks if the cluster already has a peer-pods-cm and also for the correct value of the cloud provider
*/
var (
ciCmName = "peerpods-param-cm"
configFile string
imageID string
)
g.By("Checking if peer-pods-cm exists")
_, err = checkPeerPodConfigMap(oc, opNamespace, provider, ppConfigMapName)
if err == nil {
//check for IMAGE ID in the configmap
msg, err, imageID = CheckPodVMImageID(oc, ppConfigMapName, provider, opNamespace)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v imageID: %v err: %v", msg, imageID, err))
if imageID == "" {
e2e.Logf("peer-pods-cm in the right state - does not have the IMAGE ID before the kataconfig install , msg: %v", msg)
} else {
e2e.Logf("IMAGE ID: %v", imageID)
msgIfErr := fmt.Sprintf("ERROR: peer-pods-cm has the Image ID before the kataconfig is installed, incorrect state: %v %v %v", imageID, msg, err)
o.Expect(imageID).NotTo(o.BeEmpty(), msgIfErr)
}
e2e.Logf("peer-pods-cm exists - skipping creating it")
return msg, err
} else if err != nil {
e2e.Logf("**** peer-pods-cm not found on the cluster - proceeding to create it****")
}
//Read params from peerpods-param-cm and store in ppParam struct
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default").Output()
if err != nil {
e2e.Logf("%v Configmap created by QE CI not found: msg %v err: %v", ciCmName, msg, err)
} else {
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Configmap created by QE CI has error, no .data: %v %v", ciCmName, configmapData, err)
}
e2e.Logf("configmap Data is:\n%v", configmapData)
ppParam, err := parseCIPpConfigMapData(provider, configmapData)
if err != nil {
return msg, err
}
// Create peer-pods-cm file
if provider == "aws" {
configFile, err = createAWSPeerPodsConfigMap(oc, ppParam, ppConfigMapTemplate)
} else if provider == "azure" {
configFile, err = createAzurePeerPodsConfigMap(oc, ppParam, ppConfigMapTemplate)
} else if provider == "libvirt" {
configFile, err = createLibvirtPeerPodsConfigMap(oc, ppParam, ppConfigMapTemplate)
} else {
msg = fmt.Sprintf("Cloud provider %v is not supported", provider)
return msg, fmt.Errorf("%v", msg)
}
if err != nil {
return msg, err
}
// Apply peer-pods-cm file
g.By("(Apply peer-pods-cm file)")
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Output()
if err != nil {
return fmt.Sprintf("Error: applying peer-pods-cm %v failed: %v %v", configFile, msg, err), err
}
}
return msg, err
}
func createAWSPeerPodsConfigMap(oc *exutil.CLI, ppParam PeerpodParam, ppConfigMapTemplate string) (string, error) {
g.By("Create peer-pods-cm file")
// Processing configmap template and create " <randomstring>peer-pods-cm.json"
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", ppConfigMapTemplate,
"-p", "VXLAN_PORT="+ppParam.VXLAN_PORT, "PODVM_INSTANCE_TYPE="+ppParam.PODVM_INSTANCE_TYPE,
"PROXY_TIMEOUT="+ppParam.PROXY_TIMEOUT, "AWS_REGION="+ppParam.AWS_REGION,
"AWS_SUBNET_ID="+ppParam.AWS_SUBNET_ID, "AWS_VPC_ID="+ppParam.AWS_VPC_ID,
"AWS_SG_IDS="+ppParam.AWS_SG_IDS).OutputToFile(getRandomString() + "peer-pods-cm.json")
if configFile != "" {
osStatMsg, configFileExists := os.Stat(configFile)
if configFileExists != nil {
e2e.Logf("issue creating peer-pods-cm file %s, err: %v , osStatMsg: %v", configFile, err, osStatMsg)
}
}
return configFile, err
}
func createAzurePeerPodsConfigMap(oc *exutil.CLI, ppParam PeerpodParam, ppConfigMapTemplate string) (string, error) {
g.By("Create peer-pods-cm file")
// Processing configmap template and create " <randomstring>peer-pods-cm.json"
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", ppConfigMapTemplate,
"-p", "VXLAN_PORT="+ppParam.VXLAN_PORT, "AZURE_INSTANCE_SIZE="+ppParam.AZURE_INSTANCE_SIZE,
"AZURE_SUBNET_ID="+ppParam.AZURE_SUBNET_ID, "AZURE_NSG_ID="+ppParam.AZURE_NSG_ID,
"PROXY_TIMEOUT="+ppParam.PROXY_TIMEOUT, "AZURE_REGION="+ppParam.AZURE_REGION,
"AZURE_RESOURCE_GROUP="+ppParam.AZURE_RESOURCE_GROUP).OutputToFile(getRandomString() + "peer-pods-cm.json")
if configFile != "" {
osStatMsg, configFileExists := os.Stat(configFile)
if configFileExists != nil {
e2e.Logf("issue creating peer-pods-cm file %s, err: %v , osStatMsg: %v", configFile, err, osStatMsg)
}
}
return configFile, err
}
func createLibvirtPeerPodsConfigMap(oc *exutil.CLI, ppParam PeerpodParam, ppConfigMapTemplate string) (string, error) {
g.By("Create peer-pods-cm file")
// Processing configmap template and create " <randomstring>peer-pods-cm.json"
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", ppConfigMapTemplate,
"-p", "PROXY_TIMEOUT="+ppParam.PROXY_TIMEOUT).OutputToFile(getRandomString() + "peer-pods-cm.json")
if configFile != "" {
osStatMsg, configFileExists := os.Stat(configFile)
if configFileExists != nil {
e2e.Logf("issue creating peer-pods-cm file %s, err: %v , osStatMsg: %v", configFile, err, osStatMsg)
}
}
return configFile, err
}
func createLibvirtPeerPodsParamConfigMap(oc *exutil.CLI, ppParam PeerpodParam, ppConfigMapTemplate string) (string, error) {
g.By("Create libvirt-podvm-image-cm file")
// Processing configmap template and create " <randomstring>peer-pods-cm.json"
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", ppConfigMapTemplate,
"-p", "PODVM_DISTRO="+ppParam.LIBVIRT_PODVM_DISTRO, "CAA_SRC="+ppParam.LIBVIRT_CAA_SRC, "CAA_REF="+ppParam.LIBVIRT_CAA_REF, "DOWNLOAD_SOURCES="+ppParam.LIBVIRT_DOWNLOAD_SOURCES, "CONFIDENTIAL_COMPUTE_ENABLED="+ppParam.LIBVIRT_CONFIDENTIAL_COMPUTE_ENABLED, "UPDATE_PEERPODS_CM="+ppParam.LIBVIRT_UPDATE_PEERPODS_CM, "ORG_ID="+ppParam.LIBVIRT_ORG_ID, "BASE_OS_VERSION="+ppParam.LIBVIRT_BASE_OS_VERSION, "IMAGE_NAME="+ppParam.LIBVIRT_IMAGE_NAME, "PODVM_TAG="+ppParam.LIBVIRT_PODVM_TAG, "SE_BOOT="+ppParam.LIBVIRT_SE_BOOT, "PODVM_IMAGE_URI="+ppParam.LIBVIRT_PODVM_IMAGE_URI).OutputToFile(getRandomString() + "peerpods-param-cm.json")
if configFile != "" {
osStatMsg, configFileExists := os.Stat(configFile)
if configFileExists != nil {
e2e.Logf("issue creating libvirt-podvm-image-cm file %s, err: %v , osStatMsg: %v", configFile, err, osStatMsg)
}
}
return configFile, err
}
func createSSHPeerPodsKeys(oc *exutil.CLI, ppParam PeerpodParam, provider string) error {
g.By("Create ssh keys")
keyName := "id_rsa_" + getRandomString()
pubKeyName := keyName + ".pub"
fromFile := []string{"--from-file=id_rsa.pub=./" + pubKeyName}
shredRMCmd := fmt.Sprintf(`shred -f --remove ./%v ./%v`, keyName, pubKeyName)
defer exec.Command("bash", "-c", shredRMCmd).CombinedOutput()
sshKeyGenCmd := fmt.Sprintf(`ssh-keygen -f ./%v -N ""`, keyName)
retCmd, err := exec.Command("bash", "-c", sshKeyGenCmd).CombinedOutput()
if err != nil {
e2e.Logf("the error: %v", string(retCmd))
return err
}
if provider == "libvirt" {
var (
ciCmName = "peerpods-param-cm"
ciSecretName = "peerpods-param-secret"
)
fromFile = append(fromFile, "--from-file=id_rsa=./"+keyName)
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Configmap created by QE CI has error, no .data: %v %v", ciCmName, configmapData, err)
}
ppParam, err = parseCIPpConfigMapData(provider, configmapData)
if err != nil {
e2e.Failf("Error getting ppParam %v", err)
}
secretData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", ciSecretName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Secret created by QE CI has error %v", ciSecretName, err)
}
hostpassword, err := decodeSecret(gjson.Get(secretData, "HOST_PASSWORD").String())
if err != nil {
e2e.Logf("Error: %v CI provided peer pods secret data can't be decoded", err)
return err
}
sshCopyIdCmd := fmt.Sprintf(`sshpass -p %v ssh-copy-id -i ./%v %v`, hostpassword, pubKeyName, ppParam.LIBVIRT_KVM_HOST_ADDRESS)
retCmd, err = exec.Command("bash", "-c", sshCopyIdCmd).CombinedOutput()
if err != nil {
e2e.Logf("the error: %v", string(retCmd))
return err
}
}
sshSecretCmd := append([]string{"-n", "openshift-sandboxed-containers-operator", "secret", "generic", "ssh-key-secret"}, fromFile...)
secretMsg, err := oc.AsAdmin().WithoutNamespace().Run("create").Args(sshSecretCmd...).Output()
if strings.Contains(secretMsg, "already exists") {
e2e.Logf(`ssh-key-secret created and it already exists`)
return nil
}
return err
}
func checkLabeledPodsExpectedRunning(oc *exutil.CLI, resNs, label, expectedRunning string) (msg string, err error) {
// the inputs are strings to be consistant with other check....() functions. This is also what the oc command returns
var (
resType = "pod"
jsonpath = "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}"
podList []string
podName string
number int
failMsg []string
)
podList, err = exutil.GetAllPodsWithLabel(oc, resNs, label)
if err != nil || len(podList) == 0 {
e2e.Failf("Could not get pod names with %v label: %v %v", label, podList, err)
}
number, err = strconv.Atoi(expectedRunning)
if number != len(podList) || err != nil {
e2e.Failf("ERROR: Number of pods %v does not match %v expected pods: %v %v", number, expectedRunning, msg, err)
}
for _, podName = range podList {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, podName, "-n", resNs, jsonpath).Output()
if err != nil || strings.ToLower(msg) != "true" {
failMsg = append(failMsg, fmt.Sprintf("ERROR: %v is not ready: %v %v", podName, msg, err))
}
}
if len(failMsg) != 0 {
e2e.Failf("%v pods are not ready: %v", len(failMsg), failMsg)
}
err = nil
msg = fmt.Sprintf("All %v pods ready %v)", expectedRunning, podList)
return msg, err
}
func checkResourceJsonpathMatch(oc *exutil.CLI, resType, resName, resNs, jsonPath1, jsonPath2 string) (expectedMatch, msg string, err error) {
// the inputs are strings to be consistant with other check....() functions. This is also what the oc command returns
var (
duration time.Duration = 300
interval time.Duration = 10
)
_, _ = checkResourceExists(oc, resType, resName, resNs, duration, interval)
expectedMatch, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, resName, "-n", resNs, jsonPath1).Output()
if err != nil || expectedMatch == "" {
e2e.Failf("ERROR: could not get %v from %v %v: %v %v", jsonPath1, resType, resName, expectedMatch, err)
}
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, resName, "-n", resNs, jsonPath2).Output()
if err != nil || msg == "" {
e2e.Failf("ERROR: could not get %v from %v %v: %v %v", jsonPath2, resType, resName, msg, err)
}
if expectedMatch != msg {
e2e.Failf("ERROR: %v (%v) does not match %v (%v)", jsonPath1, expectedMatch, jsonPath2, msg)
}
err = nil
msg = fmt.Sprintf("%v (%v) == %v (%v)", jsonPath1, expectedMatch, jsonPath2, msg)
return expectedMatch, msg, err
}
func clusterHasEnabledFIPS(oc *exutil.CLI, subscriptionNamespace string) bool {
firstNode, err := exutil.GetFirstMasterNode(oc)
msgIfErr := fmt.Sprintf("ERROR Could not get first node to check FIPS '%v' %v", firstNode, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(firstNode).NotTo(o.BeEmpty(), msgIfErr)
fipsModeStatus, err := oc.AsAdmin().Run("debug").Args("-n", subscriptionNamespace, "node/"+firstNode, "--", "chroot", "/host", "fips-mode-setup", "--check").Output()
msgIfErr = fmt.Sprintf("ERROR Could not check FIPS on node %v: '%v' %v", firstNode, fipsModeStatus, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(fipsModeStatus).NotTo(o.BeEmpty(), msgIfErr)
// This will be true or false
return strings.Contains(fipsModeStatus, "FIPS mode is enabled.")
}
func patchPeerPodLimit(oc *exutil.CLI, opNamespace, newLimit string) {
patchLimit := "{\"spec\":{\"limit\":\"" + newLimit + "\"}}"
msg, err := oc.AsAdmin().Run("patch").Args("peerpodconfig", "peerpodconfig-openshift", "-n",
opNamespace, "--type", "merge", "--patch", patchLimit).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not patch podvm limit to %v\n error: %v %v", newLimit, msg, err))
currentLimit := getPeerPodLimit(oc, opNamespace)
o.Expect(currentLimit).To(o.Equal(newLimit))
//check node untill the new value is propagated
jsonpath := "-o=jsonpath='{.status.allocatable.kata\\.peerpods\\.io/vm}'"
nodeName, _ := exutil.GetFirstWorkerNode(oc)
nodeLimit, _ := checkResourceJsonpath(oc, "node", nodeName, opNamespace, jsonpath, newLimit, 30*time.Second, 5*time.Second)
e2e.Logf("node podvm limit is %v", nodeLimit)
o.Expect(strings.Trim(nodeLimit, "'")).To(o.Equal(newLimit))
}
func getPeerPodLimit(oc *exutil.CLI, opNamespace string) (podLimit string) {
jsonpathLimit := "-o=jsonpath={.spec.limit}"
podLimit, err := oc.AsAdmin().Run("get").Args("peerpodconfig", "peerpodconfig-openshift", "-n", opNamespace, jsonpathLimit).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not find %v in %v\n Error: %v", jsonpathLimit, "peerpodconfig-openshift", err))
e2e.Logf("peerpodconfig podvm limit is %v", podLimit)
return podLimit
}
func getPeerPodMetadataInstanceType(oc *exutil.CLI, opNamespace, podName, provider string) (string, error) {
metadataCurl := map[string][]string{
"aws": {"http://169.254.169.254/latest/meta-data/instance-type"},
"azure": {"-H", "Metadata:true", "\\*", "http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2023-07-01&format=text"},
}
podCmd := []string{"-n", opNamespace, podName, "--", "curl", "-s"}
msg, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(append(podCmd, metadataCurl[provider]...)...).Output()
return msg, err
}
func getPeerPodMetadataTags(oc *exutil.CLI, opNamespace, podName, provider string) (string, error) {
//AWS have to enable tags by 1st finding instance-id from metadata
//curl -s http://169.254.169.254/latest/meta-data/instance-id
//aws ec2 modify-instance-metadata-options --instance-id i-0a893c6458c272d12 --instance-metadata-tags enabled
//curl -s http://169.254.169.254/latest/meta-data/tags/instance/key1
//value1
metadataCurl := map[string][]string{
"aws": {"http://169.254.169.254/latest/meta-data/tags/instance/key1"},
"azure": {"-H", "Metadata:true", "\\*", "http://169.254.169.254/metadata/instance/compute/tags?api-version=2023-07-01&format=text"},
}
//azure pod tags format is different from the configmap="key1=value1,key2=value2":
// sh-4.4$ curl -s -H "Metadata:true" "\\*" "http://169.254.169.254/metadata/instance/compute/tags?api-version=2023-07-01&format=text"
// key1:value1;key2:value2sh-4.4$
podCmd := []string{"-n", opNamespace, podName, "--", "curl", "-s"}
msg, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(append(podCmd, metadataCurl[provider]...)...).Output()
return msg, err
}
func CheckPodVMImageID(oc *exutil.CLI, ppConfigMapName, provider, opNamespace string) (msg string, err error, imageID string) {
cloudProviderMap := map[string]string{
"aws": "PODVM_AMI_ID",
"azure": "AZURE_IMAGE_ID",
}
// Fetch the configmap details
msg, err = oc.AsAdmin().Run("get").Args("configmap", ppConfigMapName, "-n", opNamespace, "-o=jsonpath={.data}").Output()
if err != nil {
return "Error fetching configmap details", err, ""
}
imageIDParam := cloudProviderMap[provider]
if !gjson.Get(msg, imageIDParam).Exists() {
// Handle the case when imageIDParam is not found
e2e.Logf("Image ID parameter '%s' not found in the config map", imageIDParam)
return fmt.Sprintf("CM created does not have: %s", imageIDParam), nil, ""
}
imageID = gjson.Get(msg, imageIDParam).String()
if imageID == "" {
// Handle the case when imageIDParam is an empty string
e2e.Logf("Image ID parameter found in the config map but is an empty string; Image ID :%s", imageIDParam)
return fmt.Sprintf("CM created has an empty value for Image ID : %s", imageIDParam), nil, ""
}
return "CM does have the Image ID", nil, imageID
}
func getTestRunConfigmap(oc *exutil.CLI, testrun *TestRunDescription, testrunConfigmapNs, testrunConfigmapName string) (configmapExists bool, err error) {
configmapExists = true
if testrun.checked { // its been checked
return configmapExists, nil
}
errorMessage := ""
// testrun.checked should == false
configmapJson, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", testrunConfigmapNs, testrunConfigmapName, "-o", "json").Output()
if err != nil {
e2e.Logf("Configmap is not found: %v %v", configmapJson, err)
testrun.checked = true // we checked, it doesn't exist
return false, nil
}
// testrun.checked should still == false
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", testrunConfigmapNs, testrunConfigmapName, "-o", "jsonpath={.data}").Output()
if err != nil {
e2e.Logf("Configmap %v has error %v, no .data: %v %v", testrunConfigmapName, configmapJson, configmapData, err)
return configmapExists, err
}
e2e.Logf("configmap file %v found. Data is:\n%v", testrunConfigmapName, configmapData)
if gjson.Get(configmapData, "catalogsourcename").Exists() {
testrun.catalogSourceName = gjson.Get(configmapData, "catalogsourcename").String()
} else {
errorMessage = fmt.Sprintf("catalogsourcename is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "channel").Exists() {
testrun.channel = gjson.Get(configmapData, "channel").String()
} else {
errorMessage = fmt.Sprintf("channel is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "redirectNeeded").Exists() {
testrun.redirectNeeded = gjson.Get(configmapData, "redirectNeeded").Bool()
} else {
errorMessage = fmt.Sprintf("redirectNeeded is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "mustgatherimage").Exists() {
testrun.mustgatherImage = gjson.Get(configmapData, "mustgatherimage").String()
if strings.Contains(testrun.mustgatherImage, "brew.registry.redhat.io") {
testrun.redirectNeeded = true
}
} else {
errorMessage = fmt.Sprintf("mustgatherimage is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "eligibility").Exists() {
testrun.eligibility = gjson.Get(configmapData, "eligibility").Bool()
} else {
errorMessage = fmt.Sprintf("eligibility is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "eligibleSingleNode").Exists() {
testrun.eligibleSingleNode = gjson.Get(configmapData, "eligibleSingleNode").Bool()
} else {
errorMessage = fmt.Sprintf("eligibleSingleNode is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "labelSingleNode").Exists() {
testrun.labelSingleNode = gjson.Get(configmapData, "labelsinglenode").Bool()
} else {
errorMessage = fmt.Sprintf("labelSingleNode is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "operatorVer").Exists() {
testrun.operatorVer = gjson.Get(configmapData, "operatorVer").String()
} else {
errorMessage = fmt.Sprintf("operatorVer is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "runtimeClassName").Exists() {
testrun.runtimeClassName = gjson.Get(configmapData, "runtimeClassName").String()
} else {
errorMessage = fmt.Sprintf("runtimeClassName is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "enablePeerPods").Exists() {
testrun.enablePeerPods = gjson.Get(configmapData, "enablePeerPods").Bool()
} else {
errorMessage = fmt.Sprintf("enablePeerPods is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "enableGPU").Exists() {
testrun.enableGPU = gjson.Get(configmapData, "enableGPU").Bool()
} else {
errorMessage = fmt.Sprintf("enableGPU is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "podvmImageUrl").Exists() {
testrun.podvmImageUrl = gjson.Get(configmapData, "podvmImageUrl").String()
} else {
errorMessage = fmt.Sprintf("podvmImageUrl is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "workloadImage").Exists() {
testrun.workloadImage = gjson.Get(configmapData, "workloadImage").String()
} else {
errorMessage = fmt.Sprintf("workloadImage is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "installKataRPM").Exists() {
testrun.installKataRPM = gjson.Get(configmapData, "installKataRPM").Bool()
} else {
errorMessage = fmt.Sprintf("installKataRPM is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "workloadToTest").Exists() {
testrun.workloadToTest = gjson.Get(configmapData, "workloadToTest").String()
workloadAllowed := false
for _, v := range allowedWorkloadTypes {
if v == testrun.workloadToTest {
workloadAllowed = true
}
}
if !workloadAllowed {
errorMessage = fmt.Sprintf("workloadToTest (%v) is not one of the allowed workloads (%v)\n%v", testrun.workloadToTest, allowedWorkloadTypes, errorMessage)
}
} else {
errorMessage = fmt.Sprintf("workloadToTest is missing from data\n%v", errorMessage)
}
// only if testing coco workloads
// not required yet, so set defaults
if testrun.workloadToTest == "coco" {
trusteeErrorMessage := ""
if gjson.Get(configmapData, "trusteeCatalogSourcename").Exists() {
testrun.trusteeCatalogSourcename = gjson.Get(configmapData, "trusteeCatalogSourcename").String()
} else {
testrun.trusteeCatalogSourcename = "redhat-operators"
trusteeErrorMessage = fmt.Sprintf("workload is coco and trusteeCatalogSourcename is missing from data\n%v", trusteeErrorMessage)
}
if gjson.Get(configmapData, "trusteeUrl").Exists() {
// if blank, in-cluster trustee will be used
testrun.trusteeUrl = gjson.Get(configmapData, "trusteeUrl").String()
}
if trusteeErrorMessage != "" {
e2e.Logf("Some of the trustee data was not in osc-config. Using defaults in those cases:\n%v", trusteeErrorMessage)
}
}
if errorMessage != "" {
err = fmt.Errorf("%v", errorMessage)
// testrun.checked still == false. Setup is wrong & all tests will fail
} else {
testrun.checked = true // No errors, we checked
}
return configmapExists, err
}
func getTestRunParameters(oc *exutil.CLI, subscription *SubscriptionDescription, kataconfig *KataconfigDescription, testrun *TestRunDescription, testrunConfigmapNs, testrunConfigmapName string) (configmapExists bool, err error) {
configmapExists = true
if testrun.checked { // already have everything & final values == Input values
return configmapExists, nil
}
configmapExists, err = getTestRunConfigmap(oc, testrun, testrunConfigmapNs, testrunConfigmapName)
if err != nil {
// testrun.checked should be false
return configmapExists, err
}
// no errors testrun.checked should be true
if configmapExists { // Then testrun changed & subscription & kataconfig should too
subscription.catalogSourceName = testrun.catalogSourceName
subscription.channel = testrun.channel
kataconfig.eligibility = testrun.eligibility
kataconfig.runtimeClassName = testrun.runtimeClassName
kataconfig.enablePeerPods = testrun.enablePeerPods
}
return configmapExists, nil
}
func getUpgradeCatalogConfigMap(oc *exutil.CLI, upgradeCatalog *UpgradeCatalogDescription) (err error) {
upgradeCatalog.exists = false
// need a checkResourceExists that doesn't fail when not found.
configMaps, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", upgradeCatalog.namespace, "-o=jsonpath={.items..metadata.name}").Output()
if err != nil {
err = fmt.Errorf("cannot get configmaps in ns %v: Configmaps=[%v] Error:%w", upgradeCatalog.namespace, configMaps, err)
upgradeCatalog.exists = true // override skip if there is an error
return err
}
if strings.Contains(configMaps, upgradeCatalog.name) {
upgradeCatalog.exists = true
}
if !upgradeCatalog.exists { // no cm is not error
return nil
}
upgradeCatalog.imageAfter, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", upgradeCatalog.namespace, upgradeCatalog.name, "-o=jsonpath={.data.imageAfter}").Output()
if err != nil || upgradeCatalog.imageAfter == "" {
err = fmt.Errorf("The %v configmap is missing the imageAfter: %v %v", upgradeCatalog.name, upgradeCatalog.imageAfter, err)
return err
}
upgradeCatalog.imageBefore, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("catsrc", "-n", "openshift-marketplace", upgradeCatalog.catalogName, "-o=jsonpath={.spec.image}").Output()
if err != nil {
err = fmt.Errorf("Could not get the current image from the %v catsrc %v %v", upgradeCatalog.catalogName, upgradeCatalog.imageBefore, err)
return err
}
return nil
}
func changeCatalogImage(oc *exutil.CLI, catalogName, catalogImage string) (err error) {
patch := fmt.Sprintf("{\"spec\":{\"image\":\"%v\"}}", catalogImage)
msg, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("catsrc", catalogName, "--type", "merge", "-p", patch, "-n", "openshift-marketplace").Output()
if err != nil {
err = fmt.Errorf("Could not patch %v %v %v", catalogName, msg, err)
return err
}
msg, err = oc.AsAdmin().Run("get").Args("catsrc", catalogName, "-n", "openshift-marketplace", "-o=jsonpath={.spec.image}").Output()
if err != nil || msg != catalogImage {
err = fmt.Errorf("Catalog patch did not change image to %v %v %v", catalogImage, msg, err)
return err
}
waitForCatalogReadyOrFail(oc, catalogName)
return nil
}
func waitForCatalogReadyOrFail(oc *exutil.CLI, catalogName string) {
_, _ = checkResourceJsonpath(oc, "catsrc", catalogName, "openshift-marketplace", "-o=jsonpath={.status.connectionState.lastObservedState}", "READY", 300*time.Second, 10*time.Second)
}
func checkResourceJsonPathChanged(oc *exutil.CLI, resType, resName, resNs, jsonpath, currentValue string, duration, interval time.Duration) (newValue string, err error) {
// watch a resource that has a known value until it changes. Return the new value
errCheck := wait.PollImmediate(interval, duration, func() (bool, error) {
newValue, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, resName, "-n", resNs, jsonpath).Output()
if newValue != currentValue && err == nil {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v %v in ns %v is not in %v state after %v sec: %v %v", resType, resName, resNs, currentValue, duration, newValue, err))
return newValue, nil
}
func waitForPodsToTerminate(oc *exutil.CLI, namespace, listOfPods string) {
var (
podStillRunning bool
currentPods string
)
errCheck := wait.PollImmediate(10*time.Second, snooze*time.Second, func() (bool, error) {
podStillRunning = false
currentPods, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-o=jsonpath={.items..metadata.name}").Output()
for _, pod := range strings.Fields(listOfPods) {
if strings.Contains(currentPods, pod) {
podStillRunning = true
break
}
}
if podStillRunning {
return false, nil
}
return true, nil
})
currentPods, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-o=jsonpath={.items..metadata.name}").Output()
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Timeout waiting for a (%v) pods to terminate. Current pods %v running", listOfPods, currentPods))
}
func patchPodvmEnableGPU(oc *exutil.CLI, opNamespace, cmName, enableGpu string) {
patchGPU := "{\"data\":{\"ENABLE_NVIDIA_GPU\":\"" + enableGpu + "\"}}"
msg, err := oc.AsAdmin().Run("patch").Args("configmap", cmName, "-n",
opNamespace, "--type", "merge", "--patch", patchGPU).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not patch ENABLE_NVIDIA_GPU to %v\n error: %v %v", enableGpu, msg, err))
currentGPU := getPodvmEnableGPU(oc, opNamespace, cmName)
o.Expect(currentGPU).To(o.Equal(enableGpu))
}
func getPodvmEnableGPU(oc *exutil.CLI, opNamespace, cmName string) (enGPU string) {
jsonpath := "-o=jsonpath={.data.ENABLE_NVIDIA_GPU}"
msg, err := oc.AsAdmin().Run("get").Args("configmap", cmName, "-n", opNamespace, jsonpath).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not find %v in %v\n Error: %v", jsonpath, cmName, err))
e2e.Logf("ENABLE_NVIDIA_GPU is %v", msg)
return msg
}
func installKataContainerRPM(oc *exutil.CLI, testrun *TestRunDescription) (rpmName string, err error) {
workerNodeList, err := exutil.GetClusterNodesBy(oc, "worker")
if err != nil || len(workerNodeList) < 1 {
err = fmt.Errorf("Error: no worker nodes: %v, %v", workerNodeList, err)
return rpmName, err
}
rpmName, err = checkNodesForKataContainerRPM(oc, testrun, workerNodeList)
if err != nil {
return rpmName, err
}
errors := ""
cmd := fmt.Sprintf("mount -o remount,rw /usr; rpm -Uvh /var/local/%v", rpmName)
for index := range workerNodeList {
msg, err := exutil.DebugNodeWithOptionsAndChroot(oc, workerNodeList[index], []string{"-q"}, "/bin/sh", "-c", cmd)
if !(strings.Contains(msg, "already installed") || strings.Contains(msg, "installing")) {
if err != nil {
errors = fmt.Sprintf("%vError trying to rpm -Uvh %v on %v: %v %v\n", errors, rpmName, workerNodeList[index], msg, err)
}
}
}
if errors != "" {
err = fmt.Errorf("Error: Scratch rpm errors: %v", errors)
}
return rpmName, err
}
func checkNodesForKataContainerRPM(oc *exutil.CLI, testrun *TestRunDescription, workerNodeList []string) (rpmName string, err error) {
// check if rpm exists
errors := ""
msg := ""
cmd := fmt.Sprintf("ls -1 /var/local | grep '^kata-containers.*rpm$'")
for index := range workerNodeList {
msg, err = exutil.DebugNodeWithOptionsAndChroot(oc, workerNodeList[index], []string{"-q"}, "/bin/sh", "-c", cmd)
if strings.Contains(msg, "kata-containers") && strings.Contains(msg, ".rpm") {
rpmName = strings.TrimRight(msg, "\n") // need test
}
if rpmName == "" {
errors = fmt.Sprintf("%vError finding /var/local/kata-containers.*rpm on %v: %v %v\n", errors, workerNodeList[index], msg, err)
}
}
if errors != "" {
err = fmt.Errorf("Errors finding rpm in /var/local: %v", errors)
}
return rpmName, err
}
func verifyImageCreationJobSuccess(oc *exutil.CLI, namespace string, ppParam PeerpodParam, ciCmName string, provider string) (msg string, err error) {
var jobPodName string
err = wait.PollImmediate(10*time.Second, 15*time.Minute, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "--field-selector=status.phase=Succeeded", "--selector=job-name=osc-podvm-image-creation", "-o=jsonpath={.items[0].metadata.name}").Output()
if err != nil || msg == "" {
e2e.Logf("Waiting for PodVM image creation job to complete")
return false, nil
}
jobPodName = msg
return true, nil
})
if err != nil {
return "", fmt.Errorf("Image creation job did not succeed within the expected time")
}
logs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args(jobPodName, "-n", namespace).Output()
if err != nil {
return "", fmt.Errorf("Error retrieving logs: %v", err)
}
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", namespace, "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Configmap created by QE CI has error: %v", ciCmName, err)
}
ppParam, err = parseCIPpConfigMapData(provider, configmapData)
if err != nil {
e2e.Failf("Error getting ppParam %v", err)
}
if ppParam.LIBVIRT_PODVM_IMAGE_URI != "" {
if !strings.Contains(logs, "Checksum of the PodVM image:") {
return "", fmt.Errorf("Pulling image from LIBVIRT_PODVM_IMAGE_URI failed")
}
e2e.Logf("PodVM image pull logs validated successfully")
}
if !strings.Contains(logs, "Uploaded the image successfully") || !strings.Contains(logs, "configmap/peer-pods-cm patched") {
logLines := strings.Split(logs, "\n")
start := len(logLines) - 30
if start < 0 {
start = 0
}
endLogs := logLines[start:]
trimmedLogs := strings.Join(endLogs, "\n")
e2e.Logf("Job logs do not contain success messages: %v", trimmedLogs)
return "", fmt.Errorf("Failed to get expected success message from the job logs")
}
configMapOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "peer-pods-cm", "-n", namespace, "-o=jsonpath={.data.LIBVIRT_IMAGE_ID}").Output()
if err != nil {
return "", fmt.Errorf("Failed to retrieve LIBVIRT_IMAGE_ID from ConfigMap: %v", err)
}
if !strings.Contains(logs, fmt.Sprintf("vol-upload: found option <vol>: %s", configMapOutput)) {
return "", fmt.Errorf("LIBVIRT_IMAGE_ID in ConfigMap does not match the logs")
}
return logs, nil
}
func checkSEEnabled(oc *exutil.CLI, podName, namespace string) error {
var errors []string
result, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", namespace, "--", "cat", "/sys/firmware/uv/prot_virt_guest").Output()
if err != nil || result != "1" {
errors = append(errors, fmt.Sprintf("prot_virt_guest is not 1, got %v", result))
}
result, err = oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", namespace, "--", "grep", "facilities", "/proc/cpuinfo").Output()
if err != nil || !strings.Contains(result, "158") {
errors = append(errors, fmt.Sprintf("'facilities' in /proc/cpuinfo does not contain 158, got %v", result))
}
if len(errors) > 0 {
return fmt.Errorf("SE-enabled checks failed: %v", strings.Join(errors, "; "))
}
g.By("SE checks passed for pod " + podName)
return nil
}
func deleteOperator(oc *exutil.CLI, sub SubscriptionDescription) (msg string, err error) {
//get csv from sub
csvName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.subName, "-n", sub.namespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: cannot get sub %v installedCSV %v %v", sub.subName, csvName, err))
o.Expect(csvName).NotTo(o.BeEmpty(), fmt.Sprintf("installedCSV value is empty: %v", csvName))
//delete csv
msg, err = deleteResource(oc, "csv", csvName, sub.namespace, resSnoose*time.Second, 10*time.Second)
if err == nil {
//delete sub
msg, err = deleteResource(oc, "sub", sub.subName, sub.namespace, resSnoose*time.Second, 10*time.Second)
}
return msg, err
}
func testControlPod(oc *exutil.CLI, namespace, resType, resName, desiredCountJsonPath, actualCountJsonPath, podLabel string) (msg string, err error) {
// Check the resource Type for desired count by looking at the jsonpath
// Check the actual count at this jsonpath
// Wait until the actual count == desired count then set expectedPods to the actual count
// Verify count of "Running" pods with podLabel matches expectedPods
expectedPods, msg, err := checkResourceJsonpathMatch(oc, resType, resName, namespace, desiredCountJsonPath, actualCountJsonPath)
if err == nil {
if msg == "" {
return "", fmt.Errorf("%v does not match %v in %v %v %v %v", desiredCountJsonPath, actualCountJsonPath, resName, resType, msg, err)
}
msg, err = checkLabeledPodsExpectedRunning(oc, namespace, podLabel, expectedPods)
if msg == "" {
return "", fmt.Errorf("Could not find pods labeled %v %v %v", podLabel, msg, err)
}
}
return msg, err
}
func configureTrustee(oc *exutil.CLI, trusteeSubscription SubscriptionDescription, testDataDir, startingTrusteeURL string) (trusteeURL string, err error) {
var (
trusteeKbsconfigTemplate = filepath.Join(testDataDir, "kbsconfig-template.yaml")
rvpsReferenceValuesCMTemplate = filepath.Join(testDataDir, "rvps-reference-values-template.yaml")
resourcePolicyCMTemplate = filepath.Join(testDataDir, "resource-policy-template.yaml")
securityPolicyCMTemplate = filepath.Join(testDataDir, "security-policy-template.json")
kbsconfigCMTemplate = filepath.Join(testDataDir, "kbs-config-cm-template.yaml")
trusteeCosignPublicKey = filepath.Join(testDataDir, "trustee-cosign-publickey.pem")
trusteeKbsPublicKey = filepath.Join(testDataDir, "kbs-auth-public-key")
)
msg, err := oc.AsAdmin().Run("create").Args("secret", "generic", "kbs-auth-public-key",
"--from-file=publicKey="+trusteeKbsPublicKey, "-n", trusteeSubscription.namespace).Output()
if err != nil {
e2e.Logf("TRUSTEE Created kbs-auth-public-key secret: %v %v", msg, err)
}
templateArgs := fmt.Sprintf("NAME=%v INSECUREHTTP=true", trusteeSubscription.namespace)
kbsConfigCMFile, _ := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f",
kbsconfigCMTemplate, "-p", templateArgs).OutputToFile(getRandomString() + "kbs-config-cm.json")
err = ensureConfigmapIsApplied(oc, trusteeSubscription.namespace, kbsConfigCMFile)
if err != nil {
return trusteeURL, err
}
e2e.Logf("TRUSTEE Created kbs-config-cm: %v", err)
templateArgs = fmt.Sprintf("NAMESPACE=%v", trusteeSubscription.namespace)
rvpsReferenceValuesCMFile, _ := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f",
rvpsReferenceValuesCMTemplate, "-p", templateArgs).OutputToFile(getRandomString() + "rvps-reference-values.json")
err = ensureConfigmapIsApplied(oc, trusteeSubscription.namespace, rvpsReferenceValuesCMFile)
if err != nil {
return trusteeURL, err
}
e2e.Logf("TRUSTEE Created rvps-reference-values: %v", err)
msg, err = oc.AsAdmin().Run("create").Args("secret", "generic", "kbsres1", "--from-literal",
"key1=res1val1", "--from-literal", "key2=res1val2", "-n", trusteeSubscription.namespace).Output()
if err != nil {
e2e.Logf("TRUSTEE Created kbres1 secret: %v %v", msg, err)
}
trusteePolicyRego := "package policy default allow = true"
resourcePolicyCMFile, _ := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f",
resourcePolicyCMTemplate, "-n", trusteeSubscription.namespace, "-p", "NAMESPACE="+trusteeSubscription.namespace,
"-p", "POLICYREGO="+trusteePolicyRego).OutputToFile(getRandomString() + "resource-policy.json")
err = ensureConfigmapIsApplied(oc, trusteeSubscription.namespace, resourcePolicyCMFile)
if err != nil {
return trusteeURL, err
}
e2e.Logf("TRUSTEE Created resource-policy cm: %v", err)
// Attestation Policy goes here
// secret security-policy DONE
msg, err = oc.AsAdmin().Run("create").Args("secret", "generic", "security-policy",
"--from-file=osc="+securityPolicyCMTemplate, "-n", trusteeSubscription.namespace).Output()
if err != nil {
e2e.Logf("TRUSTEE Created security-policy secret: %v %v", msg, err)
}
// secret cosign-public-key DONE
msg, err = oc.AsAdmin().Run("create").Args("secret", "generic", "cosign-public-key",
"--from-file=test="+trusteeCosignPublicKey, "-n", trusteeSubscription.namespace).Output()
if err != nil {
e2e.Logf("TRUSTEE Created cosign-public-key secret: %v %v", msg, err)
}
// need to ensureSecret?
// kbsconfig
kbsSecretResources := `["kbsres1","security-policy", "cosign-public-key"]`
kbsconfigFile, _ := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f",
trusteeKbsconfigTemplate, "-n", trusteeSubscription.namespace,
"-p", "KBSSECRETRESOURCES="+kbsSecretResources).OutputToFile(getRandomString() + "kbsconfig.json")
msg, err = oc.AsAdmin().Run("apply").Args("-f", kbsconfigFile, "-n", trusteeSubscription.namespace).Output()
e2e.Logf("TRUSTEE Applied kbsconfig %v: %v %v", kbsconfigFile, msg, err)
if startingTrusteeURL == "" { // use internal trustee
node, err := exutil.GetFirstWorkerNode(oc)
if err != nil || node == "" {
return trusteeURL, fmt.Errorf("could not get 1st worker node: %v err: %v", node, err)
}
msg, err = oc.AsAdmin().Run("get").Args("node", node, "-o=jsonpath={.status.addresses..address}").Output()
if err != nil || msg == "" {
return trusteeURL, fmt.Errorf("Could not get ip of %v: %v %v", node, msg, err)
}
nodeIP := strings.Fields(msg)[0]
nodePort, err := oc.AsAdmin().Run("get").Args("-n", trusteeSubscription.namespace,
"service", "kbs-service", "-o=jsonpath={.spec.ports..nodePort}").Output()
if err != nil {
return trusteeURL, fmt.Errorf("Could not retrieve nodePort from kbs-service: %v %v", nodePort, err)
}
trusteeURL = fmt.Sprintf("http://%v:%v", nodeIP, nodePort)
}
return trusteeURL, err
}
| package kata | ||||
function | openshift/openshift-tests-private | 96baceba-851b-45a5-86a9-4369e0981cad | ensureNamespaceIsInstalled | ['"encoding/json"', '"fmt"', '"os"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func ensureNamespaceIsInstalled(oc *exutil.CLI, namespace, namespaceTemplateFile string) (err error) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ns", namespace, "--no-headers").Output()
if err != nil || strings.Contains(msg, "Error from server (NotFound)") {
namespaceFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", namespaceTemplateFile,
"-p", "NAME="+namespace).OutputToFile(getRandomString() + "namespaceFile.json")
if err != nil || namespaceFile == "" {
if !strings.Contains(namespaceFile, "already exists") {
_, statErr := os.Stat(namespaceFile)
if statErr != nil {
err = fmt.Errorf("ERROR creating the namespace (%v) yaml %s, %v", namespace, namespaceFile, statErr)
return err
}
}
}
msg, err = oc.AsAdmin().Run("apply").Args("-f", namespaceFile).Output()
if strings.Contains(msg, "AlreadyExists") || strings.Contains(msg, "unchanged") || strings.Contains(msg, "created") {
return nil
}
if err != nil {
return fmt.Errorf(" applying namespace file (%v) issue: %v %v", namespaceFile, msg, err)
}
}
return err
} | kata | ||||
function | openshift/openshift-tests-private | b50b9263-37ee-4e2c-b9da-c7f95899cbb2 | ensureOperatorGroupIsInstalled | ['"encoding/json"', '"fmt"', '"os"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func ensureOperatorGroupIsInstalled(oc *exutil.CLI, namespace, templateFile string) (err error) {
msg, err := oc.AsAdmin().Run("get").Args("operatorgroup", "-n", namespace, "--no-headers").Output()
if err != nil || strings.Contains(msg, "No resources found in") {
operatorgroupFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", templateFile,
"-p", "NAME="+namespace, "NAMESPACE="+namespace).OutputToFile(getRandomString() + "operatorgroupFile.json")
if err != nil || operatorgroupFile != "" {
if !strings.Contains(operatorgroupFile, "already exists") {
_, statErr := os.Stat(operatorgroupFile)
if statErr != nil {
err = fmt.Errorf("ERROR creating the operatorgroup (%v) yaml %v, %v", namespace, operatorgroupFile, statErr)
return err
}
}
}
msg, err = oc.AsAdmin().Run("apply").Args("-f", operatorgroupFile, "-n", namespace).Output()
if strings.Contains(msg, "AlreadyExists") || strings.Contains(msg, "unchanged") || strings.Contains(msg, "created") {
return nil
}
if err != nil {
return fmt.Errorf("applying operatorgroup file (%v) issue %v %v", operatorgroupFile, msg, err)
}
}
return err
} | kata | ||||
function | openshift/openshift-tests-private | 57483a0c-3a6c-4898-9273-26248a8704bd | ensureOperatorIsSubscribed | ['"encoding/json"', '"fmt"', '"os"', '"strings"'] | ['SubscriptionDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func ensureOperatorIsSubscribed(oc *exutil.CLI, sub SubscriptionDescription, subTemplate string) (err error) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.subName, "-n", sub.namespace, "--no-headers").Output()
if err != nil || strings.Contains(msg, "Error from server (NotFound):") {
subFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", sub.template, "-p", "SUBNAME="+sub.subName, "SUBNAMESPACE="+sub.namespace, "CHANNEL="+sub.channel,
"APPROVAL="+sub.ipApproval, "OPERATORNAME="+sub.operatorPackage, "SOURCENAME="+sub.catalogSourceName, "SOURCENAMESPACE="+sub.catalogSourceNamespace, "-n", sub.namespace).OutputToFile(getRandomString() + "subscriptionFile.json")
if err != nil || subFile != "" {
if !strings.Contains(subFile, "already exists") {
_, subFileExists := os.Stat(subFile)
if subFileExists != nil {
err = fmt.Errorf("ERROR creating the subscription yaml %s, %v", subFile, err)
return err
}
}
}
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", subFile).Output()
if err != nil || msg == "" {
err = fmt.Errorf("ERROR applying subscription %v: %v, %v", subFile, msg, err)
return err
}
}
_, err = subscriptionIsFinished(oc, sub)
return err
} | kata | |||
function | openshift/openshift-tests-private | b0789050-fea7-4334-bd08-32e3a925587f | ensureFeatureGateIsApplied | ['"fmt"', '"strings"'] | ['SubscriptionDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func ensureFeatureGateIsApplied(oc *exutil.CLI, sub SubscriptionDescription, featureGatesFile string) (err error) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "osc-feature-gates", "-n", sub.namespace, "--no-headers").Output()
if strings.Contains(msg, "Error from server (NotFound)") {
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", featureGatesFile).Output()
if err != nil && !strings.Contains(msg, "already exists exit") {
err = fmt.Errorf("featureGates cm issue %v %v", msg, err)
}
}
return err
} | kata | |||
function | openshift/openshift-tests-private | 2b39471c-2557-4b7a-9c3a-25551d5773d1 | ensureTrusteeKbsServiceRouteExists | ['"io"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func ensureTrusteeKbsServiceRouteExists(oc *exutil.CLI, namespace, routeType, routeName string) (err error) {
var (
msg string
)
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("route", routeName, "-n", namespace, "--no-headers").Output()
if err == nil && strings.Contains(msg, routeName) {
return nil
}
if strings.Contains(msg, "(NotFound)") {
msg, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("route", routeType, "--service="+routeName, "--port", "kbs-port", "-n", namespace).Output()
if strings.Contains(msg, "route.route.openshift.io/"+routeName+" created") || strings.Contains(msg, "(AlreadyExists)") {
return nil
}
}
return err
} | kata | ||||
function | openshift/openshift-tests-private | e3c64349-240e-47bd-9f05-894ec2363caf | ensureTrusteeUrlReturnIsValid | ['"encoding/json"', '"fmt"', '"io"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func ensureTrusteeUrlReturnIsValid(oc *exutil.CLI, kbsClientTemplate, trusteeUrl, correctAnswer, trusteeNamespace string) (err error) {
var (
podName = "kbs-client"
kbsClientImage = "quay.io/confidential-containers/kbs-client:v0.9.0"
phase = "Running"
outputFromOc string
namespace = "default"
)
// make sure the trustee deployment pod is ready
trusteeDeploymentPod, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", trusteeNamespace,
"-l", "app=kbs", "-o=jsonpath={.items[0].metadata.name}").Output()
outputFromOc, err = checkResourceJsonpath(oc, "pod", trusteeDeploymentPod, trusteeNamespace, "-o=jsonpath={.status.phase}", phase, podSnooze*time.Second, 10*time.Second)
if outputFromOc == "" || err != nil {
return fmt.Errorf("Could not get pod (%v) status %v: %v %v", trusteeDeploymentPod, phase, outputFromOc, err)
}
kbsClientFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f",
kbsClientTemplate, "-p", "NAME="+podName, "IMAGE="+kbsClientImage).OutputToFile(getRandomString() + "kbsClientFile.json")
if kbsClientFile == "" {
return fmt.Errorf("Did not get a filename when processing %v: err:%v", kbsClientTemplate, err)
}
defer deleteKataResource(oc, "pod", namespace, podName)
outputFromOc, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", kbsClientFile, "-n", namespace).Output()
if err != nil {
e2e.Logf("WARNING: creating kbs-client %v err: %v", outputFromOc, err)
}
outputFromOc, err = checkResourceJsonpath(oc, "pod", podName, namespace, "-o=jsonpath={.status.phase}", phase, podSnooze*time.Second, 10*time.Second)
if err != nil {
return fmt.Errorf("Could not get pod (%v) status %v: %v err: %v", podName, phase, outputFromOc, err)
}
kbsAnswer, err := oc.AsAdmin().Run("rsh").Args("-T", "-n", namespace,
podName, "kbs-client", "--url", trusteeUrl, "get-resource", "--path", "default/kbsres1/key1").Output()
if err != nil || kbsAnswer != "cmVzMXZhbDE=" {
return fmt.Errorf("Could not query trustee at %v. %v err %v", trusteeUrl, kbsAnswer, err)
}
return err
} | kata | ||||
function | openshift/openshift-tests-private | e08ce911-c3f9-49e6-b2a6-757d72fd2b31 | ensureTrusteeIsInstalled | ['"fmt"'] | ['SubscriptionDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func ensureTrusteeIsInstalled(oc *exutil.CLI, subscription SubscriptionDescription, namespaceTemplate, ogTemplate, subTemplate string) (trusteeRouteHost string, err error) {
err = ensureNamespaceIsInstalled(oc, subscription.namespace, namespaceTemplate)
if err != nil {
return trusteeRouteHost, err
}
err = ensureOperatorGroupIsInstalled(oc, subscription.namespace, ogTemplate)
if err != nil {
return trusteeRouteHost, err
}
err = ensureOperatorIsSubscribed(oc, subscription, subTemplate)
if err != nil {
return trusteeRouteHost, err
}
trusteeRouteName := "kbs-service"
err = ensureTrusteeKbsServiceRouteExists(oc, subscription.namespace, "edge", trusteeRouteName)
if err != nil {
return trusteeRouteHost, err
}
trusteeRouteHost, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("route", trusteeRouteName, "-o=jsonpath={.spec.host}", "-n", subscription.namespace).Output()
if trusteeRouteHost == "" {
err = fmt.Errorf("trusteeRouteHost was empty. err %v", err)
}
return trusteeRouteHost, err
} | kata | |||
function | openshift/openshift-tests-private | 59d67e61-d2a2-47e7-8248-311faadc0244 | ensureConfigmapIsApplied | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func ensureConfigmapIsApplied(oc *exutil.CLI, namespace, configmapFile string) (err error) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configmapFile, "-n", namespace).Output()
if err != nil && !strings.Contains(msg, "already exists exit") {
err = fmt.Errorf("configmap %v file issue %v %v", configmapFile, msg, err)
}
return err
} | kata | ||||
function | openshift/openshift-tests-private | dd0d6235-0cdb-4915-8841-d935f7821753 | ensureKataconfigIsCreated | ['"encoding/json"', '"os"', '"strconv"', '"strings"', '"time"'] | ['SubscriptionDescription', 'KataconfigDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func ensureKataconfigIsCreated(oc *exutil.CLI, kataconf KataconfigDescription, sub SubscriptionDescription) (msg string, err error) {
// If this is used, label the caller with [Disruptive][Serial][Slow]
// If kataconfig already exists, this must not error
var (
configFile string
)
_, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kataconf.name, "--no-headers", "-n", sub.namespace).Output()
if err == nil {
// kataconfig exists. Is it finished?
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kataconf.name, "-n", sub.namespace, kataconfigStatusQuery).Output()
if strings.ToLower(msg) == "false" {
g.By("(3) kataconfig is previously installed")
return msg, err // no need to go through the rest
}
}
g.By("(3) Make sure subscription has finished before kataconfig")
msg, err = subscriptionIsFinished(oc, sub)
if err != nil {
e2e.Logf("The subscription has not finished: %v %v", msg, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).NotTo(o.BeEmpty())
g.By("(3.1) Create kataconfig file")
configFile, err = oc.AsAdmin().WithoutNamespace().Run("process").Args("--ignore-unknown-parameters=true", "-f", kataconf.template,
"-p", "NAME="+kataconf.name, "LOGLEVEL="+kataconf.logLevel, "PEERPODS="+strconv.FormatBool(kataconf.enablePeerPods), "ELIGIBILITY="+strconv.FormatBool(kataconf.eligibility),
"-n", sub.namespace).OutputToFile(getRandomString() + "kataconfig-common.json")
if err != nil || configFile == "" {
_, configFileExists := os.Stat(configFile)
if configFileExists != nil {
e2e.Logf("issue creating kataconfig file is %s, %v", configFile, err)
}
}
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("service", "controller-manager-service", "-n", sub.namespace).Output()
e2e.Logf("Controller-manager-service: %v %v", msg, err)
g.By("(3.2) Apply kataconfig file")
// -o=jsonpath={.status.installationStatus.IsInProgress} "" at this point
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Output()
if err != nil {
e2e.Logf("Error: applying kataconfig %v failed: %v %v", configFile, msg, err)
}
// If it is already applied by a parallel test there will be an err
g.By("(3.3) Check kataconfig creation has started")
_, _ = checkResourceExists(oc, "kataconfig", kataconf.name, sub.namespace, snooze*time.Second, 10*time.Second)
g.By("(3.4) Wait for kataconfig to finish install")
// Installing/deleting kataconfig reboots nodes. AWS BM takes 20 minutes/node
msg, err = waitForKataconfig(oc, kataconf.name, sub.namespace)
return msg, err
} | kata | |||
function | openshift/openshift-tests-private | bf9cf7be-0e30-4aa6-bbe8-29722fa23b04 | createKataPodAnnotated | ['"encoding/json"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createKataPodAnnotated(oc *exutil.CLI, podNs, template, basePodName, runtimeClassName, workloadImage string, annotations map[string]string) (msg string, err error) {
var (
newPodName string
configFile string
phase = "Running"
)
newPodName = getRandomString() + basePodName
configFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", template, "-p", "NAME="+newPodName,
"-p", "MEMORY="+annotations["MEMORY"], "-p", "CPU="+annotations["CPU"], "-p",
"INSTANCESIZE="+annotations["INSTANCESIZE"], "-p", "RUNTIMECLASSNAME="+runtimeClassName, "IMAGE="+workloadImage).OutputToFile(getRandomString() + "Pod-common.json")
o.Expect(err).NotTo(o.HaveOccurred())
return createKataPodFromTemplate(oc, podNs, newPodName, configFile, runtimeClassName, phase)
} | kata | ||||
function | openshift/openshift-tests-private | f868b0fc-2420-4a62-9d10-5d28f13e5e14 | createKataPodFromTemplate | ['"fmt"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createKataPodFromTemplate(oc *exutil.CLI, podNs, newPodName, configFile, runtimeClassName, phase string) (msg string, err error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile, "-n", podNs).Output()
if msg == "" || err != nil {
return msg, fmt.Errorf("Could not apply configFile %v: %v %v", configFile, msg, err)
}
g.By(fmt.Sprintf("Checking if pod %v is ready", newPodName))
msg, err = checkResourceJsonpath(oc, "pod", newPodName, podNs, "-o=jsonpath={.status.phase}", phase, podSnooze*time.Second, 10*time.Second)
if msg == "" || err != nil {
return msg, fmt.Errorf("Could not get pod (%v) status %v: %v %v", newPodName, phase, msg, err)
}
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", newPodName, "-n", podNs, "-o=jsonpath={.spec.runtimeClassName}").Output()
if msg != runtimeClassName || err != nil {
err = fmt.Errorf("pod %v has wrong runtime %v, expecting %v %v", newPodName, msg, runtimeClassName, err)
}
return newPodName, err
} | kata | ||||
function | openshift/openshift-tests-private | 12f3be31-ac5f-45a1-b689-29021eb70093 | createKataPod | ['"encoding/json"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createKataPod(oc *exutil.CLI, podNs, commonPod, basePodName, runtimeClassName, workloadImage string) string {
var (
err error
newPodName string
configFile string
phase = "Running"
)
newPodName = getRandomString() + basePodName
configFile, err = oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", commonPod, "-p",
"NAME="+newPodName, "-p", "RUNTIMECLASSNAME="+runtimeClassName, "-p", "IMAGE="+workloadImage).OutputToFile(getRandomString() + "Pod-common.json")
o.Expect(err).NotTo(o.HaveOccurred())
podname, err := createKataPodFromTemplate(oc, podNs, newPodName, configFile, runtimeClassName, phase)
o.Expect(err).NotTo(o.HaveOccurred())
return podname
} | kata | ||||
function | openshift/openshift-tests-private | ddb944b5-3cfe-4447-9829-d7c37b16fdc5 | deleteKataResource | ['"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func deleteKataResource(oc *exutil.CLI, res, resNs, resName string) bool {
_, err := deleteResource(oc, res, resName, resNs, podSnooze*time.Second, 10*time.Second)
if err != nil {
return false
}
return true
} | kata | ||||
function | openshift/openshift-tests-private | 41a64da3-56c0-4351-bbcd-92d7be7cac89 | getRandomString | ['"math/rand"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
} | kata | ||||
function | openshift/openshift-tests-private | 53cedfeb-31e3-486e-af52-735b009afb39 | deleteKataConfig | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func deleteKataConfig(oc *exutil.CLI, kcName string) (msg string, err error) {
g.By("(4.1) Trigger kataconfig deletion")
msg, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("kataconfig", kcName).Output()
if err != nil || msg == "" {
e2e.Logf("Unexpected error while trying to delete kataconfig: %v\nerror: %v", msg, err)
}
//SNO could become unavailable while restarting
//o.Expect(err).NotTo(o.HaveOccurred())
g.By("(4.2) Wait for kataconfig to be deleted")
errCheck := wait.Poll(30*time.Second, kataSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig").Output()
if strings.Contains(msg, "No resources found") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("kataconfig %v did not get deleted: %v %v", kcName, msg, err))
g.By("(4.3) kataconfig is gone")
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | 4458d8e4-0677-4f45-97bb-da250e15127a | checkKataconfigIsCreated | ['"fmt"', '"strings"', '"github.com/tidwall/gjson"'] | ['SubscriptionDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkKataconfigIsCreated(oc *exutil.CLI, sub SubscriptionDescription, kcName string) (err error) {
jsonSubStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.subName, "-n", sub.namespace, "-o=jsonpath={.status}").Output()
if err != nil || gjson.Get(jsonSubStatus, "state").String() != "AtLatestKnown" {
return fmt.Errorf("issue with subscription or state isn't expected: %v, actual: %v error: %v", "AtLatestKnown", jsonSubStatus, err)
}
if !strings.Contains(gjson.Get(jsonSubStatus, "installedCSV").String(), sub.subName) {
return fmt.Errorf("Error: get installedCSV for subscription %v %v", jsonSubStatus, err)
}
csvName := gjson.Get(jsonSubStatus, "installedCSV").String()
jsonCsvStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", csvName, "-n", sub.namespace, "-o=jsonpath={.status}").Output()
if err != nil ||
gjson.Get(jsonCsvStatus, "phase").String() != "Succeeded" ||
gjson.Get(jsonCsvStatus, "reason").String() != "InstallSucceeded" {
return fmt.Errorf("Error: CSV %v in wrong state, expected: %v actual:\n%v %v", csvName, "InstallSucceeded", jsonCsvStatus, err)
}
// check kataconfig
msg, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kcName, "-n", sub.namespace, kataconfigStatusQuery).Output()
if err == nil && strings.ToLower(msg) == "false" {
return nil
}
return fmt.Errorf("Error: Kataconfig in wrong state, expected: false actual: %v error: %v", msg, err)
} | kata | |||
function | openshift/openshift-tests-private | 4a2cb1aa-9d94-4517-a8e5-83fc3d59a957 | subscriptionIsFinished | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['SubscriptionDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func subscriptionIsFinished(oc *exutil.CLI, sub SubscriptionDescription) (msg string, err error) {
var (
csvName string
controlPod string
)
g.By("(2) Subscription checking")
msg, _ = checkResourceJsonpath(oc, "sub", sub.subName, sub.namespace, "-o=jsonpath={.status.state}", "AtLatestKnown", snooze*time.Second, 10*time.Second)
csvName, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.subName, "-n", sub.namespace, "-o=jsonpath={.status.installedCSV}").Output()
if err != nil || csvName == "" {
e2e.Logf("ERROR: cannot get sub %v installedCSV %v %v", sub.subName, csvName, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(csvName).NotTo(o.BeEmpty())
g.By("(2.1) Check that the csv '" + csvName + "' has finished")
msg, err = checkResourceJsonpath(oc, "csv", csvName, sub.namespace, "-o=jsonpath={.status.phase}{.status.reason}", "SucceededInstallSucceeded", snooze*time.Second, 10*time.Second)
// need controller-manager-service and controller-manager-* pod running before kataconfig
// oc get pod -o=jsonpath={.items..metadata.name} && find one w/ controller-manager
g.By("(2.2) Wait for controller manager pod to start")
// checkResourceJsonpath() needs exact pod name. control-manager deploy does not have full name
errCheck := wait.PollImmediate(10*time.Second, podSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-o=jsonpath={.items..metadata.name}", "-n", sub.namespace).Output()
if strings.Contains(msg, "controller-manager") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Controller manger pods did not start %v %v", msg, err))
// what is the pod name?
for _, controlPod = range strings.Fields(msg) {
if strings.Contains(controlPod, "controller-manager") {
break // no need to check the rest
}
}
// controller-podname -o=jsonpath={.status.containerStatuses} && !strings.Contains("false")
g.By("(2.3) Check that " + controlPod + " is ready")
// this checks that the 2 containers in the pod are not showing false. checkResourceJsonpath() cannot be used
errCheck = wait.PollImmediate(10*time.Second, podSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", controlPod, "-o=jsonpath={.status.containerStatuses}", "-n", sub.namespace).Output()
if !strings.Contains(strings.ToLower(msg), "false") {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("control pod %v did not become ready: %v %v", controlPod, msg, err))
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.subName, "-n", sub.namespace, "--no-headers").Output()
return msg, err
} | kata | |||
function | openshift/openshift-tests-private | 7c2cc83d-b718-4f2c-adb6-41defde1fa3f | waitForNodesInDebug | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func waitForNodesInDebug(oc *exutil.CLI, opNamespace string) (msg string, err error) {
count := 0
workerNodeList, err := exutil.GetClusterNodesBy(oc, "worker")
o.Expect(err).NotTo(o.HaveOccurred())
workerNodeCount := len(workerNodeList)
if workerNodeCount < 1 {
e2e.Logf("Error: no worker nodes: %v, %v", workerNodeList, err)
}
o.Expect(workerNodeList).NotTo(o.BeEmpty())
//e2e.Logf("Waiting for %v nodes to enter debug: %v", workerNodeCount, workerNodeList)
// loop all workers until they all have debug
errCheck := wait.Poll(10*time.Second, snooze*time.Second, func() (bool, error) {
count = 0
for index := range workerNodeList {
msg, err = oc.AsAdmin().Run("debug").Args("-n", opNamespace, "node/"+workerNodeList[index], "--", "chroot", "/host", "crio", "config").Output()
if strings.Contains(msg, "log_level = \"debug") {
count++
o.Expect(msg).To(o.ContainSubstring("log_level = \"debug"))
}
}
if count == workerNodeCount {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Error: only %v of %v total worker nodes are in debug: %v\n %v", count, workerNodeCount, workerNodeList, msg))
msg = fmt.Sprintf("All %v worker nodes are in debug mode: %v", workerNodeCount, workerNodeList)
err = nil
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | a0ac3a77-edf9-463b-a56d-824f3cecc0fa | applyImageRedirect | ['"fmt"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func applyImageRedirect(oc *exutil.CLI, redirectFile, redirectType, redirectName string) error {
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", redirectFile).Output()
if err != nil {
return fmt.Errorf("ERROR applying %v: %v %v", redirectType, msg, err)
}
_, err = checkResourceExists(oc, redirectType, redirectName, "default", 360*time.Second, 10*time.Second)
return err
} | kata | ||||
function | openshift/openshift-tests-private | 67c1cd96-708a-4bce-91f7-6a149a1c8c48 | waitForDeployment | ['"fmt"', '"time"', '"github.com/tidwall/gjson"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func waitForDeployment(oc *exutil.CLI, podNs, deployName string) (msg string, err error) {
var (
snooze time.Duration = 300
replicas string
)
replicas, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", podNs, deployName, "-o=jsonpath={.spec.replicas}").Output()
if err != nil {
e2e.Logf("replica fetch failed %v %v", replicas, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(replicas).NotTo(o.BeEmpty())
errCheck := wait.Poll(10*time.Second, snooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", podNs, deployName, "-o=jsonpath={.status.readyReplicas}").Output()
if msg == replicas {
return true, nil
}
return false, nil
})
if errCheck != nil {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", "-n", podNs, deployName, "-o=jsonpath={.status}").Output()
e2e.Logf("timed out %v != %v %v", replicas, msg, err)
msg = gjson.Get(msg, "readyReplicas").String()
}
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Deployment has %v replicas, not %v %v", replicas, msg, err))
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | 9ec92ffd-d71f-4b48-8da4-dcafb41ceab0 | deleteDeployment | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func deleteDeployment(oc *exutil.CLI, deployNs, deployName string) bool {
return deleteKataResource(oc, "deploy", deployNs, deployName)
} | kata | |||||
function | openshift/openshift-tests-private | f100a09d-b792-4279-9aee-9ac1ac9f7162 | getClusterVersion | ['"encoding/json"', '"strconv"', '"strings"', '"github.com/tidwall/gjson"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getClusterVersion(oc *exutil.CLI) (clusterVersion, ocpMajorVer, ocpMinorVer string, minorVer int) {
jsonVersion, err := oc.AsAdmin().WithoutNamespace().Run("version").Args("-o", "json").Output()
if err != nil || jsonVersion == "" || !gjson.Get(jsonVersion, "openshiftVersion").Exists() {
e2e.Logf("Error: could not get oc version: %v %v", jsonVersion, err)
}
clusterVersion = gjson.Get(jsonVersion, "openshiftVersion").String()
sa := strings.Split(clusterVersion, ".")
ocpMajorVer = sa[0]
ocpMinorVer = sa[1]
minorVer, _ = strconv.Atoi(ocpMinorVer)
return clusterVersion, ocpMajorVer, ocpMinorVer, minorVer
} | kata | ||||
function | openshift/openshift-tests-private | 5ce90d7d-81a2-4bd8-843f-284135e2f996 | waitForKataconfig | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func waitForKataconfig(oc *exutil.CLI, kcName, opNamespace string) (msg string, err error) {
// Installing/deleting kataconfig reboots nodes. AWS BM takes 20 minutes/node
errCheck := wait.Poll(30*time.Second, kataSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kcName, "-n", opNamespace, kataconfigStatusQuery).Output()
if strings.ToLower(msg) == "false" {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("kataconfig %v did not finish install", kcName))
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("kataconfig", kcName, "--no-headers").Output()
msg = "SUCCESS kataconfig is created " + msg
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | f386a5f3-13c8-4058-ac12-d21569b2a537 | changeSubscriptionCatalog | ['"fmt"'] | ['SubscriptionDescription', 'TestRunDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func changeSubscriptionCatalog(oc *exutil.CLI, subscription SubscriptionDescription, testrun TestRunDescription) (msg string, err error) {
// check for catsrc existence before calling
patch := fmt.Sprintf("{\"spec\":{\"source\":\"%v\"}}", testrun.catalogSourceName)
msg, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("sub", subscription.subName, "--type", "merge", "-p", patch, "-n", subscription.namespace).Output()
return msg, err
} | kata | |||
function | openshift/openshift-tests-private | ea35f486-a8ed-49b5-bd27-8d7c57926977 | changeSubscriptionChannel | ['"fmt"'] | ['SubscriptionDescription', 'TestRunDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func changeSubscriptionChannel(oc *exutil.CLI, subscription SubscriptionDescription, testrun TestRunDescription) (msg string, err error) {
patch := fmt.Sprintf("{\"spec\":{\"channel\":\"%v\"}}", testrun.channel)
msg, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("sub", subscription.subName, "--type", "merge", "-p", patch, "-n", subscription.namespace).Output()
return msg, err
} | kata | |||
function | openshift/openshift-tests-private | 5f30bb35-5dec-4e03-b49e-eca6efc90d88 | logErrorAndFail | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func logErrorAndFail(oc *exutil.CLI, logMsg, msg string, err error) {
e2e.Logf("%v: %v %v", logMsg, msg, err)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(msg).NotTo(o.BeEmpty())
} | kata | |||||
function | openshift/openshift-tests-private | 5ed6362c-c853-4343-90f4-e0529c0ced16 | checkAndLabelCustomNodes | ['TestRunDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkAndLabelCustomNodes(oc *exutil.CLI, testrun TestRunDescription) {
e2e.Logf("check and label nodes (or single node for custom label)")
nodeCustomList := exutil.GetNodeListByLabel(oc, customLabel)
if len(nodeCustomList) > 0 {
e2e.Logf("labeled nodes found %v", nodeCustomList)
} else {
if testrun.labelSingleNode {
node, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
LabelNode(oc, node, customLabel)
} else {
labelSelectedNodes(oc, workerLabel, customLabel)
}
}
} | kata | ||||
function | openshift/openshift-tests-private | e9a8dc0e-2ba2-42f9-b1e3-6ebaa9816846 | labelEligibleNodes | ['TestRunDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func labelEligibleNodes(oc *exutil.CLI, testrun TestRunDescription) {
e2e.Logf("Label worker nodes for eligibility feature")
if testrun.eligibleSingleNode {
node, err := exutil.GetFirstWorkerNode(oc)
o.Expect(err).NotTo(o.HaveOccurred())
LabelNode(oc, node, featureLabel)
} else {
labelSelectedNodes(oc, workerLabel, featureLabel)
}
} | kata | ||||
function | openshift/openshift-tests-private | dd31767c-a44b-431a-b3ed-ce836a949c42 | labelSelectedNodes | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func labelSelectedNodes(oc *exutil.CLI, selectorLabel, customLabel string) {
nodeList := exutil.GetNodeListByLabel(oc, selectorLabel)
if len(nodeList) > 0 {
for _, node := range nodeList {
LabelNode(oc, node, customLabel)
}
}
} | kata | |||||
function | openshift/openshift-tests-private | e2172496-1340-4081-a651-2cd5119a4db9 | LabelNode | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func LabelNode(oc *exutil.CLI, node, customLabel string) {
msg, err := oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, customLabel).Output()
e2e.Logf("%v applied and output was: %v %v", customLabel, msg, err)
o.Expect(err).NotTo(o.HaveOccurred())
} | kata | |||||
function | openshift/openshift-tests-private | 1ddfddb1-be31-4a1e-a1db-d06e20ac5420 | getInstancesOnNode | ['"fmt"', '"strconv"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getInstancesOnNode(oc *exutil.CLI, opNamespace, node string) (instances int, err error) {
cmd := fmt.Sprintf("ps -ef | grep uuid | grep -v grep | wc -l")
msg, err := exutil.DebugNodeWithOptionsAndChroot(oc, node, []string{"-q"}, "bin/sh", "-c", cmd)
o.Expect(err).NotTo(o.HaveOccurred())
instances, err = strconv.Atoi(strings.TrimSpace(msg))
if err != nil {
instances = 0
}
return instances, err
} | kata | ||||
function | openshift/openshift-tests-private | 505552a8-a1d7-4d6c-be8e-a856e4e9ae7b | getTotalInstancesOnNodes | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getTotalInstancesOnNodes(oc *exutil.CLI, opNamespace string, nodeList []string) (total int) {
total = 0
count := 0
for _, node := range nodeList {
count, _ = getInstancesOnNode(oc, opNamespace, node)
e2e.Logf("found %v VMs on node %v", count, node)
total += count
}
e2e.Logf("Total %v VMs on all nodes", total)
return total
} | kata | |||||
function | openshift/openshift-tests-private | 3305192b-68c0-44f4-b8fd-52f2efa7548b | getAllKataNodes | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getAllKataNodes(oc *exutil.CLI, eligibility bool, opNamespace, featureLabel, customLabel string) (nodeNameList []string) {
actLabel := customLabel
if eligibility {
actLabel = featureLabel
}
return exutil.GetNodeListByLabel(oc, actLabel)
} | kata | |||||
function | openshift/openshift-tests-private | 2211af76-1e56-477f-b4aa-162598f160fd | getHttpResponse | ['"fmt"', '"io"', '"net/http"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getHttpResponse(url string, expStatusCode int) (resp string, err error) {
resp = ""
res, err := http.Get(url)
if err == nil {
defer res.Body.Close()
if res.StatusCode != expStatusCode {
err = fmt.Errorf("Response from url=%v\n actual status code=%d doesn't match expected %d\n", url, res.StatusCode, expStatusCode)
} else {
body, err := io.ReadAll(res.Body)
if err == nil {
resp = string(body)
}
}
}
return resp, err
} | kata | ||||
function | openshift/openshift-tests-private | f855d6d6-92fb-477e-b492-d9d5fbacc05e | createServiceAndRoute | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createServiceAndRoute(oc *exutil.CLI, deployName, podNs string) (host string, err error) {
msg, err := oc.WithoutNamespace().Run("expose").Args("deployment", deployName, "-n", podNs).Output()
if err != nil {
e2e.Logf("Expose deployment failed with: %v %v", msg, err)
} else {
msg, err = oc.Run("expose").Args("service", deployName, "-n", podNs).Output()
if err != nil {
e2e.Logf("Expose service failed with: %v %v", msg, err)
} else {
host, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("routes", deployName, "-n", podNs, "-o=jsonpath={.spec.host}").Output()
if err != nil || host == "" {
e2e.Logf("Failed to get host from route, actual host=%v\n error %v", host, err)
}
host = strings.Trim(host, "'")
}
}
return host, err
} | kata | ||||
function | openshift/openshift-tests-private | c2c2ad3c-9f9d-4ee6-bece-a7e2843102a3 | deleteRouteAndService | ['"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func deleteRouteAndService(oc *exutil.CLI, deployName, podNs string) {
// oc.AsAdmin().WithoutNamespace().Run("delete").Args("svc", "-n", podNs, deployName, "--ignore-not-found").Execute()
// oc.AsAdmin().WithoutNamespace().Run("delete").Args("route", "-n", podNs, deployName, "--ignore-not-found").Execute()
_, _ = deleteResource(oc, "svc", deployName, podNs, podSnooze*time.Second, 10*time.Second)
_, _ = deleteResource(oc, "route", deployName, podNs, podSnooze*time.Second, 10*time.Second)
} | kata | ||||
function | openshift/openshift-tests-private | 3b730fba-fd9c-4949-a4ce-a4afc556c8e8 | checkPeerPodSecrets | ['"fmt"', '"github.com/tidwall/gjson"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkPeerPodSecrets(oc *exutil.CLI, opNamespace, provider string, ppSecretName string) (msg string, err error) {
var (
errors = 0
errorList []string
providerVars []string
)
switch provider {
case "azure":
providerVars = append(providerVars, "AZURE_CLIENT_ID", "AZURE_CLIENT_SECRET", "AZURE_SUBSCRIPTION_ID", "AZURE_TENANT_ID")
case "aws":
providerVars = append(providerVars, "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY")
case "libvirt":
providerVars = append(providerVars, "LIBVIRT_URI", "LIBVIRT_POOL", "LIBVIRT_VOL_NAME")
default:
msg = fmt.Sprintf("Cloud provider %v is not supported", provider)
err = fmt.Errorf("%v", msg)
return msg, err
}
jsonData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secrets", ppSecretName, "-n", opNamespace, "-o=jsonpath={.data}").Output()
if err != nil {
msg = fmt.Sprintf("Secret for %v not exists", provider)
err = fmt.Errorf("%v", msg)
return msg, err
}
for index := range providerVars {
if !gjson.Get(jsonData, providerVars[index]).Exists() || gjson.Get(jsonData, providerVars[index]).String() == "" {
errors++
errorList = append(errorList, providerVars[index])
}
}
msg = ""
if errors != 0 {
msg = fmt.Sprintf("ERROR missing vars in secret %v %v", errors, errorList)
err = fmt.Errorf("%v", msg)
}
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | 3cdeb111-62ad-4d17-92dd-7497cd7a8474 | decodeSecret | ['"encoding/base64"', '"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func decodeSecret(input string) (msg string, err error) {
debase64, err := base64.StdEncoding.DecodeString(input)
if err != nil {
msg = fmt.Sprintf("Was not able to decode %v. %v %v", input, debase64, err)
} else {
msg = fmt.Sprintf("%s", debase64)
}
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | 1536f8b3-cb19-48d1-9725-d31be9c300de | checkPeerPodConfigMap | ['"fmt"', '"github.com/tidwall/gjson"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkPeerPodConfigMap(oc *exutil.CLI, opNamespace, provider, ppConfigMapName string) (msg string, err error) {
var (
errors = 0
errorList []string
providerVars []string
)
switch provider {
case "azure":
providerVars = append(providerVars, "CLOUD_PROVIDER", "AZURE_NSG_ID", "AZURE_SUBNET_ID", "VXLAN_PORT", "AZURE_REGION", "AZURE_RESOURCE_GROUP")
case "aws":
providerVars = append(providerVars, "CLOUD_PROVIDER", "AWS_REGION", "AWS_SG_IDS", "AWS_SUBNET_ID", "AWS_VPC_ID", "VXLAN_PORT")
case "libvirt":
providerVars = append(providerVars, "CLOUD_PROVIDER")
default:
msg = fmt.Sprintf("Cloud provider %v is not supported", provider)
err = fmt.Errorf("%v", msg)
return msg, err
}
jsonData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", ppConfigMapName, "-n", opNamespace, "-o=jsonpath={.data}").Output()
if err != nil {
msg = fmt.Sprintf("Configmap for %v not exists", provider)
err = fmt.Errorf("%v", msg)
return msg, err
}
for index := range providerVars {
if !gjson.Get(jsonData, providerVars[index]).Exists() || gjson.Get(jsonData, providerVars[index]).String() == "" {
errors++
errorList = append(errorList, providerVars[index])
}
}
msg = ""
if errors != 0 {
msg = fmt.Sprintf("ERROR missing vars in configmap %v %v", errors, errorList)
err = fmt.Errorf("%v", msg)
}
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | dd949155-7ed7-4d6b-af8c-d7d0bd3479f7 | checkPeerPodControl | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkPeerPodControl(oc *exutil.CLI, opNamespace, expStatus string) (msg string, err error) {
// This would check peer pod webhook pod , peerpodconfig-ctrl-caa pods , webhook service and endpoints attached to the svc
//TODO: should add podvm image builder pod completed?
var (
peerpodconfigCtrlCaaPods []string
webhookPods []string
webhooksvc = "peer-pods-webhook-svc"
)
g.By("Check for peer pods webhook pod")
// checkResourceJsonpath needs a pod name
errCheck := wait.PollImmediate(10*time.Second, podSnooze*time.Second, func() (bool, error) {
msg, err := oc.AsAdmin().Run("get").Args("pod", "-o=jsonpath={.items..metadata.name}", "-n", opNamespace).Output()
if err != nil {
return false, err
}
if strings.Contains(msg, "peer-pods-webhook") {
return true, nil
}
return false, nil
})
if err != nil || msg == "" || errCheck != nil {
e2e.Logf(" %v %v, %v", msg, err, errCheck)
}
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("peer pod webhook pod did not start: %v", errCheck))
//webhook pod names
msg, err = oc.AsAdmin().Run("get").Args("pod", "-o=jsonpath={.items..metadata.name}", "-n", opNamespace).Output()
for _, whPod := range strings.Fields(msg) {
if strings.Contains(whPod, "peer-pods-webhook") {
webhookPods = append(webhookPods, whPod)
}
}
//count check
whPodCount := len(webhookPods)
if whPodCount != 2 {
e2e.Logf("There should be two webhook pods, instead there are: %v", whPodCount)
return
}
//pod state check
for _, podName := range webhookPods {
checkControlPod(oc, podName, opNamespace, expStatus)
}
g.By("Check for peer pods ctrl caa pod")
// checkResourceJsonpath needs a podname
errCheck = wait.PollImmediate(10*time.Second, podSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().Run("get").Args("pod", "-o=jsonpath={.items..metadata.name}", "-n", opNamespace).Output()
if strings.Contains(msg, "peerpodconfig-ctrl-caa-daemon") {
return true, nil
}
return false, nil
})
if err != nil || msg == "" || errCheck != nil {
e2e.Logf(" %v %v, %v", msg, err, errCheck)
}
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("peer pod ctrl caa pod did not start %v %v", msg, err))
//peerpodconfig ctrl CAA pod names
msg, err = oc.AsAdmin().Run("get").Args("pod", "-o=jsonpath={.items..metadata.name}", "-n", opNamespace).Output()
for _, ppconfigCaaPod := range strings.Fields(msg) {
if strings.Contains(ppconfigCaaPod, "peerpodconfig-ctrl-caa") {
peerpodconfigCtrlCaaPods = append(peerpodconfigCtrlCaaPods, ppconfigCaaPod)
}
}
//pod state check
for _, podName := range peerpodconfigCtrlCaaPods {
checkControlPod(oc, podName, opNamespace, expStatus)
}
//webhook service
checkControlSvc(oc, opNamespace, webhooksvc)
g.By("SUCCESS - peerpod config check passed")
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | 76dc9999-5a51-4a94-91f5-94f3729b8c1b | checkControlPod | ['"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkControlPod(oc *exutil.CLI, podName, podNs, expStatus string) (msg string, err error) {
msg, err = checkResourceJsonpath(oc, "pods", podName, podNs, "-o=jsonpath={.status.phase}", expStatus, podSnooze*time.Second, 10*time.Second)
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | b531aa47-108a-4611-9ff2-8f36ef74a7d2 | checkControlSvc | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkControlSvc(oc *exutil.CLI, svcNs, svcName string) (msg string, err error) {
g.By("Check for " + svcName + "service")
msg, err = checkResourceJsonpath(oc, "service", svcName, svcNs, "-o=jsonpath={.metadata.name}", svcName, podSnooze*time.Second, 10*time.Second)
g.By("Check for " + svcName + "service endpoints")
// checkResourceJsonpath does strings.Contains not ContainsAny
errCheck := wait.PollImmediate(10*time.Second, podSnooze*time.Second, func() (bool, error) {
msg, err = oc.AsAdmin().Run("get").Args("ep", svcName, "-n", svcNs, "-o=jsonpath={.subsets..addresses..ip}").Output()
if strings.ContainsAny(msg, "0123456789") {
return true, nil
}
return false, nil
})
if err != nil || msg == "" || errCheck != nil {
e2e.Logf(" %v %v, %v", msg, err, errCheck)
}
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v does not have endpoints attached to it; err: %v", svcName, err))
g.By("SUCCESS - service check passed")
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | 407d7701-3b0e-48ec-afc4-d54b0f7a6a87 | checkResourceExists | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkResourceExists(oc *exutil.CLI, resType, resName, resNs string, duration, interval time.Duration) (msg string, err error) {
// working: pod, deploy, service, route, ep, ds
errCheck := wait.PollImmediate(interval, duration, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, resName, "-n", resNs, "--no-headers").Output()
if strings.Contains(msg, resName) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v %v was not found in ns %v after %v sec: %v %v", resType, resName, resNs, duration, msg, err))
return msg, nil
} | kata | ||||
function | openshift/openshift-tests-private | 533ee9fa-cce2-4127-ba91-c70c8e420727 | checkResourceJsonpath | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkResourceJsonpath(oc *exutil.CLI, resType, resName, resNs, jsonpath, expected string, duration, interval time.Duration) (msg string, err error) {
// resType=pod, -o=jsonpath='{.status.phase}', expected="Running"
// resType=deploy, -o=jsonpath='{.status.conditions[?(@.type=="Available")].status}', expected="True"
// resType=route, -o=jsonpath='{.status.ingress..conditions[?(@.type==\"Admitted\")].status}', expected="True"
// resType=ds, -o=jsonpath='{.status.ingress..conditions[?(@.type==\"Admitted\")].status}'", expected= number of nodes w/ kata-oc
// fmt.Sprintf("%v", len(exutil.GetNodeListByLabel(oc, kataocLabel)))
/* readyReplicas might not exist in .status!
// resType=deploy, -o=jsonpath='{.status.readyReplicas}', expected = spec.replicas
*/
errCheck := wait.PollImmediate(interval, duration, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, resName, "-n", resNs, jsonpath).Output()
if strings.Contains(msg, expected) {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v %v in ns %v is not in %v state after %v sec: %v %v", resType, resName, resNs, expected, duration, msg, err))
return msg, nil
} | kata | ||||
function | openshift/openshift-tests-private | 162ce2dd-f5d4-4f7a-814b-64afad340066 | deleteResource | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func deleteResource(oc *exutil.CLI, res, resName, resNs string, duration, interval time.Duration) (msg string, err error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("delete").Args(res, resName, "-n", resNs, "--ignore-not-found").Output()
if err != nil {
msg = fmt.Sprintf("ERROR: Cannot start deleting %v %v -n %v: %v %v", res, resName, resNs, msg, err)
e2e.Failf(msg)
}
// make sure it doesn't exist
errCheck := wait.PollImmediate(interval, duration, func() (bool, error) {
msg, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(res, resName, "-n", resNs, "--no-headers").Output()
if strings.Contains(msg, "not found") {
return true, nil
}
return false, nil
})
if errCheck != nil {
e2e.Logf("ERROR: Timeout waiting for delete to finish on %v %v -n %v: %v", res, resName, resNs, msg)
}
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v %v was not finally deleted in ns %v", res, resName, resNs))
msg = fmt.Sprintf("deleted %v %v -n %v: %v %v", res, resName, resNs, msg, err)
err = nil
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | 316b8c69-7710-4846-81b1-25675fedf6d9 | createApplyPeerPodSecrets | ['"fmt"', '"os"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createApplyPeerPodSecrets(oc *exutil.CLI, provider string, ppParam PeerpodParam, opNamespace, ppSecretName, secretTemplate string) (msg string, err error) {
var (
ciCmName = "peerpods-param-cm"
ciSecretName = "peerpods-param-secret"
)
// Check if the secrets already exist
g.By("Checking if peer-pods-secret exists")
msg, err = checkPeerPodSecrets(oc, opNamespace, provider, ppSecretName)
if err == nil && msg == "" {
e2e.Logf("peer-pods-secret exists - skipping creating it")
return msg, err
}
// e2e.Logf("**** peer-pods-secret not found on the cluster - proceeding to create it****")
//Read params from peerpods-param-cm and store in ppParam struct
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default").Output()
if err != nil {
e2e.Logf("%v Configmap created by QE CI not found: msg %v err: %v", ciCmName, msg, err)
} else {
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Configmap created by QE CI has error, no .data: %v %v", ciCmName, configmapData, err)
}
e2e.Logf("configmap Data is:\n%v", configmapData)
ppParam, err := parseCIPpConfigMapData(provider, configmapData)
if err != nil {
return msg, err
}
var secretFilePath string
if provider == "aws" {
secretFilePath, err = createAWSPeerPodSecrets(oc, ppParam, ciSecretName, secretTemplate)
} else if provider == "azure" {
secretFilePath, err = createAzurePeerPodSecrets(oc, ppParam, ciSecretName, secretTemplate)
} else if provider == "libvirt" {
secretFilePath, err = createLibvirtPeerPodSecrets(oc, ppParam, ciSecretName, secretTemplate)
} else {
msg = fmt.Sprintf("Cloud provider %v is not supported", provider)
return msg, fmt.Errorf("%v", msg)
}
if err != nil {
return msg, err
}
g.By("(Apply peer-pods-secret file)")
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", secretFilePath).Output()
if err != nil {
e2e.Logf("Error: applying peer-pods-secret %v failed: %v %v", secretFilePath, msg, err)
}
if errRemove := os.Remove(secretFilePath); errRemove != nil {
e2e.Logf("Error: removing secret file %v failed: %v", secretFilePath, errRemove)
}
}
return msg, err
} | kata | |||
function | openshift/openshift-tests-private | 661f18a2-2ef9-441e-8972-1fc4b7cf3ef1 | createApplyPeerPodsParamLibvirtConfigMap | ['"fmt"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createApplyPeerPodsParamLibvirtConfigMap(oc *exutil.CLI, provider string, ppParam PeerpodParam, opNamespace, ppConfigMapName, ppConfigMapTemplate string) (msg string, err error) {
var (
ciCmName = "peerpods-param-cm"
configFile string
)
g.By("Checking if libvirt-podvm-image-cm exists")
_, err = checkPeerPodConfigMap(oc, opNamespace, provider, ppConfigMapName)
if err == nil {
e2e.Logf("libvirt-podvm-image-cm exists - skipping creating it")
return msg, err
} else if err != nil {
e2e.Logf("**** libvirt-podvm-image-cm not found on the cluster - proceeding to create it****")
}
// Read params from libvirt-podvm-image-cm and store in ppParam struct
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default").Output()
if err != nil {
e2e.Logf("%v Configmap created by QE CI not found: msg %v err: %v", ciCmName, msg, err)
} else {
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Configmap created by QE CI has error, no .data: %v %v", ciCmName, configmapData, err)
}
ppParam, err := parseCIPpConfigMapData(provider, configmapData)
if err != nil {
return msg, err
}
// Create libvirt-podvm-image-cm file
if provider == "libvirt" {
configFile, err = createLibvirtPeerPodsParamConfigMap(oc, ppParam, ppConfigMapTemplate)
} else {
msg = fmt.Sprintf("Cloud provider %v is not supported", provider)
return msg, fmt.Errorf("%v", msg)
}
if err != nil {
return msg, err
}
// Apply libvirt-podvm-image-cm file
g.By("(Apply libvirt-podvm-image-cm file)")
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Output()
if err != nil {
return fmt.Sprintf("Error: applying libvirt-podvm-image-cm %v failed: %v %v", configFile, msg, err), err
}
}
return msg, err
} | kata | |||
function | openshift/openshift-tests-private | f5951770-d2fd-435f-817c-df62c50cffad | parseCIPpConfigMapData | ['"fmt"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func parseCIPpConfigMapData(provider, configmapData string) (PeerpodParam, error) {
var ppParam PeerpodParam
switch provider {
case "aws":
return parseAWSCIConfigMapData(configmapData)
case "azure":
return parseAzureCIConfigMapData(configmapData)
case "libvirt":
return parseLibvirtCIConfigMapData(configmapData)
default:
return ppParam, fmt.Errorf("Cloud provider %v is not supported", provider)
}
} | kata | |||
function | openshift/openshift-tests-private | 7a954a5c-a63e-46b8-8e95-335c40347e1e | parseLibvirtCIConfigMapData | ['"github.com/tidwall/gjson"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func parseLibvirtCIConfigMapData(configmapData string) (PeerpodParam, error) {
var ppParam PeerpodParam
if gjson.Get(configmapData, "PROXY_TIMEOUT").Exists() {
ppParam.PROXY_TIMEOUT = gjson.Get(configmapData, "PROXY_TIMEOUT").String()
}
if gjson.Get(configmapData, "LIBVIRT_KVM_HOST_ADDRESS").Exists() {
ppParam.LIBVIRT_KVM_HOST_ADDRESS = gjson.Get(configmapData, "LIBVIRT_KVM_HOST_ADDRESS").String()
}
if gjson.Get(configmapData, "PODVM_DISTRO").Exists() {
ppParam.LIBVIRT_PODVM_DISTRO = gjson.Get(configmapData, "PODVM_DISTRO").String()
}
if gjson.Get(configmapData, "CAA_SRC").Exists() {
ppParam.LIBVIRT_CAA_SRC = gjson.Get(configmapData, "CAA_SRC").String()
}
if gjson.Get(configmapData, "CAA_REF").Exists() {
ppParam.LIBVIRT_CAA_REF = gjson.Get(configmapData, "CAA_REF").String()
}
if gjson.Get(configmapData, "DOWNLOAD_SOURCES").Exists() {
ppParam.LIBVIRT_DOWNLOAD_SOURCES = gjson.Get(configmapData, "DOWNLOAD_SOURCES").String()
}
if gjson.Get(configmapData, "CONFIDENTIAL_COMPUTE_ENABLED").Exists() {
ppParam.LIBVIRT_CONFIDENTIAL_COMPUTE_ENABLED = gjson.Get(configmapData, "CONFIDENTIAL_COMPUTE_ENABLED").String()
}
if gjson.Get(configmapData, "UPDATE_PEERPODS_CM").Exists() {
ppParam.LIBVIRT_UPDATE_PEERPODS_CM = gjson.Get(configmapData, "UPDATE_PEERPODS_CM").String()
}
if gjson.Get(configmapData, "ORG_ID").Exists() {
ppParam.LIBVIRT_ORG_ID = gjson.Get(configmapData, "ORG_ID").String()
}
if gjson.Get(configmapData, "BASE_OS_VERSION").Exists() {
ppParam.LIBVIRT_BASE_OS_VERSION = gjson.Get(configmapData, "BASE_OS_VERSION").String()
}
if gjson.Get(configmapData, "IMAGE_NAME").Exists() {
ppParam.LIBVIRT_IMAGE_NAME = gjson.Get(configmapData, "IMAGE_NAME").String()
}
if gjson.Get(configmapData, "PODVM_TAG").Exists() {
ppParam.LIBVIRT_PODVM_TAG = gjson.Get(configmapData, "PODVM_TAG").String()
}
if gjson.Get(configmapData, "SE_BOOT").Exists() {
ppParam.LIBVIRT_SE_BOOT = gjson.Get(configmapData, "SE_BOOT").String()
}
if gjson.Get(configmapData, "PODVM_IMAGE_URI").Exists() {
ppParam.LIBVIRT_PODVM_IMAGE_URI = gjson.Get(configmapData, "PODVM_IMAGE_URI").String()
}
return ppParam, nil
} | kata | |||
function | openshift/openshift-tests-private | 1dfd1cbf-fcd8-4581-8368-60d2a4ab61d6 | parseAWSCIConfigMapData | ['"github.com/tidwall/gjson"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func parseAWSCIConfigMapData(configmapData string) (PeerpodParam, error) {
var ppParam PeerpodParam
if gjson.Get(configmapData, "AWS_REGION").Exists() {
ppParam.AWS_REGION = gjson.Get(configmapData, "AWS_REGION").String()
}
if gjson.Get(configmapData, "AWS_SUBNET_ID").Exists() {
ppParam.AWS_SUBNET_ID = gjson.Get(configmapData, "AWS_SUBNET_ID").String()
}
if gjson.Get(configmapData, "AWS_VPC_ID").Exists() {
ppParam.AWS_VPC_ID = gjson.Get(configmapData, "AWS_VPC_ID").String()
}
if gjson.Get(configmapData, "AWS_SG_IDS").Exists() {
ppParam.AWS_SG_IDS = gjson.Get(configmapData, "AWS_SG_IDS").String()
}
if gjson.Get(configmapData, "VXLAN_PORT").Exists() {
ppParam.VXLAN_PORT = gjson.Get(configmapData, "VXLAN_PORT").String()
}
if gjson.Get(configmapData, "PODVM_INSTANCE_TYPE").Exists() {
ppParam.PODVM_INSTANCE_TYPE = gjson.Get(configmapData, "PODVM_INSTANCE_TYPE").String()
}
if gjson.Get(configmapData, "PROXY_TIMEOUT").Exists() {
ppParam.PROXY_TIMEOUT = gjson.Get(configmapData, "PROXY_TIMEOUT").String()
}
return ppParam, nil
} | kata | |||
function | openshift/openshift-tests-private | 28c9c01e-0fc6-4214-8c4f-4c9fb82532d7 | parseAzureCIConfigMapData | ['"github.com/tidwall/gjson"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func parseAzureCIConfigMapData(configmapData string) (PeerpodParam, error) {
var ppParam PeerpodParam
if gjson.Get(configmapData, "AZURE_REGION").Exists() {
ppParam.AZURE_REGION = gjson.Get(configmapData, "AZURE_REGION").String()
}
if gjson.Get(configmapData, "AZURE_RESOURCE_GROUP").Exists() {
ppParam.AZURE_RESOURCE_GROUP = gjson.Get(configmapData, "AZURE_RESOURCE_GROUP").String()
}
if gjson.Get(configmapData, "VXLAN_PORT").Exists() {
ppParam.VXLAN_PORT = gjson.Get(configmapData, "VXLAN_PORT").String()
}
if gjson.Get(configmapData, "AZURE_INSTANCE_SIZE").Exists() {
ppParam.AZURE_INSTANCE_SIZE = gjson.Get(configmapData, "AZURE_INSTANCE_SIZE").String()
}
if gjson.Get(configmapData, "AZURE_SUBNET_ID").Exists() {
ppParam.AZURE_SUBNET_ID = gjson.Get(configmapData, "AZURE_SUBNET_ID").String()
}
if gjson.Get(configmapData, "AZURE_NSG_ID").Exists() {
ppParam.AZURE_NSG_ID = gjson.Get(configmapData, "AZURE_NSG_ID").String()
}
if gjson.Get(configmapData, "PROXY_TIMEOUT").Exists() {
ppParam.PROXY_TIMEOUT = gjson.Get(configmapData, "PROXY_TIMEOUT").String()
}
return ppParam, nil
} | kata | |||
function | openshift/openshift-tests-private | 6cb06855-8fe4-4eba-a83d-800b89522397 | createLibvirtPeerPodSecrets | ['"encoding/json"', '"fmt"', '"os"', '"github.com/tidwall/gjson"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createLibvirtPeerPodSecrets(oc *exutil.CLI, ppParam PeerpodParam, ciSecretName, secretTemplate string) (string, error) {
var (
secretString string
)
secretString, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", ciSecretName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil || secretString == "" {
e2e.Logf("Error: %v CI provided peer pods secret data empty", err)
return "", err
}
var (
LIBVIRT_URI string
LIBVIRT_POOL string
LIBVIRT_VOL_NAME string
ACTIVATION_KEY string
REDHAT_OFFLINE_TOKEN string
HOST_KEY_CERTS string
)
fields := map[string]*string{
"LIBVIRT_URI": &LIBVIRT_URI,
"LIBVIRT_POOL": &LIBVIRT_POOL,
"LIBVIRT_VOL_NAME": &LIBVIRT_VOL_NAME,
"ACTIVATION_KEY": &ACTIVATION_KEY,
"REDHAT_OFFLINE_TOKEN": &REDHAT_OFFLINE_TOKEN,
"HOST_KEY_CERTS": &HOST_KEY_CERTS,
}
for key, valuePtr := range fields {
encodedValue := gjson.Get(secretString, key).String()
if encodedValue == "" {
e2e.Logf("Warning: %v field is empty", key)
continue
}
decodedValue, err := decodeSecret(encodedValue)
if err != nil {
e2e.Logf("Error decoding %v: %v", key, err)
return "", err
}
*valuePtr = decodedValue
}
// Check for libvirt credentials
if LIBVIRT_POOL == "" || LIBVIRT_URI == "" || LIBVIRT_VOL_NAME == "" || REDHAT_OFFLINE_TOKEN == "" || ACTIVATION_KEY == "" {
msg := "Libvirt credentials not found in the data."
return msg, fmt.Errorf("Libvirt credentials not found")
}
// Construct the secretJSON for Libvirt
secretJSON := map[string]interface{}{
"apiVersion": "v1",
"kind": "Secret",
"type": "Opaque",
"metadata": map[string]string{
"name": "peer-pods-secret",
"namespace": "openshift-sandboxed-containers-operator",
},
"stringData": map[string]string{
"CLOUD_PROVIDER": "libvirt",
"LIBVIRT_URI": LIBVIRT_URI,
"LIBVIRT_POOL": LIBVIRT_POOL,
"LIBVIRT_VOL_NAME": LIBVIRT_VOL_NAME,
"REDHAT_OFFLINE_TOKEN": REDHAT_OFFLINE_TOKEN,
"ACTIVATION_KEY": ACTIVATION_KEY,
"HOST_KEY_CERTS": HOST_KEY_CERTS,
},
}
// Marshal the JSON to a string
secretJSONString, err := json.Marshal(secretJSON)
if err != nil {
return "", err
}
// Write the JSON string to the secretTemplate file
err = os.WriteFile(secretTemplate, []byte(secretJSONString), 0644)
if err != nil {
return "", err
}
return secretTemplate, nil
} | kata | |||
function | openshift/openshift-tests-private | b92ab237-ec40-4699-8c98-1179b9f57f59 | createAWSPeerPodSecrets | ['"encoding/json"', '"fmt"', '"os"', '"strings"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createAWSPeerPodSecrets(oc *exutil.CLI, ppParam PeerpodParam, ciSecretName, secretTemplate string) (string, error) {
var (
secretString string
decodedString string
lines []string
)
// Read peerpods-param-secret to fetch the keys
secretString, err := oc.AsAdmin().Run("get").Args("secret", ciSecretName, "-n", "default", "-o=jsonpath={.data.aws}").Output()
if err != nil || secretString == "" {
e2e.Logf("Error: %v CI provided peer pods secret data empty", err)
return "", err
}
decodedString, err = decodeSecret(secretString)
if err != nil {
return "", err
}
lines = strings.Split(decodedString, "\n")
accessKey := ""
secretKey := ""
for _, line := range lines {
parts := strings.Split(line, "=")
if len(parts) == 2 {
key := strings.TrimSpace(parts[0])
value := strings.TrimSpace(parts[1])
if key == "aws_access_key_id" {
accessKey = value
} else if key == "aws_secret_access_key" {
secretKey = value
}
}
}
// Check for AWS credentials
if accessKey == "" || secretKey == "" {
msg := "AWS credentials not found in the data."
return msg, fmt.Errorf("AWS credentials not found")
}
// create AWS specific secret file logic here
// Construct the secretJSON for AWS
secretJSON := map[string]interface{}{
"apiVersion": "v1",
"kind": "Secret",
"type": "Opaque",
"metadata": map[string]string{
"name": "peer-pods-secret",
"namespace": "openshift-sandboxed-containers-operator",
},
"stringData": map[string]string{
"AWS_ACCESS_KEY_ID": accessKey,
"AWS_SECRET_ACCESS_KEY": secretKey,
},
}
// Marshal the JSON to a string
secretJSONString, err := json.Marshal(secretJSON)
if err != nil {
return "", err
}
// Write the JSON string to the secretTemplate file
err = os.WriteFile(secretTemplate, []byte(secretJSONString), 0644)
if err != nil {
return "", err
}
return secretTemplate, nil
} | kata | |||
function | openshift/openshift-tests-private | ea8f97b0-7746-4a9e-9bff-d083b5a42159 | createAzurePeerPodSecrets | ['"encoding/json"', '"fmt"', '"os"', '"github.com/tidwall/gjson"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createAzurePeerPodSecrets(oc *exutil.CLI, ppParam PeerpodParam, ciSecretName, secretTemplate string) (string, error) {
var (
secretString string
decodedString string
)
// Read peerpods-param-secret to fetch the keys
secretString, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", ciSecretName, "-n", "default", "-o=jsonpath={.data.azure}").Output()
if err != nil || secretString == "" {
e2e.Logf("Error: %v CI provided peer pods secret data empty", err)
return "", err
}
decodedString, err = decodeSecret(secretString)
if err != nil {
e2e.Logf("Error: %v CI provided peer pods secret data can't be decoded", err)
return "", err
}
//check for all the keys and empty values
if !(gjson.Get(decodedString, "subscriptionId").Exists() && gjson.Get(decodedString, "clientId").Exists() &&
gjson.Get(decodedString, "clientSecret").Exists() && gjson.Get(decodedString, "tenantId").Exists()) ||
gjson.Get(decodedString, "subscriptionId").String() == "" || gjson.Get(decodedString, "clientId").String() == "" ||
gjson.Get(decodedString, "clientSecret").String() == "" || gjson.Get(decodedString, "tenantId").String() == "" {
msg := "Azure credentials not found or partial in the data."
return msg, fmt.Errorf("Azure credentials not found")
}
// create Azure specific secret file logic here
// Construct the secretJSON for Azure
secretJSON := map[string]interface{}{
"apiVersion": "v1",
"kind": "Secret",
"type": "Opaque",
"metadata": map[string]string{
"name": "peer-pods-secret",
"namespace": "openshift-sandboxed-containers-operator",
},
"stringData": map[string]string{
"AZURE_CLIENT_ID": gjson.Get(decodedString, "clientId").String(),
"AZURE_CLIENT_SECRET": gjson.Get(decodedString, "clientSecret").String(),
"AZURE_TENANT_ID": gjson.Get(decodedString, "tenantId").String(),
"AZURE_SUBSCRIPTION_ID": gjson.Get(decodedString, "subscriptionId").String(),
},
}
// Marshal the JSON to a string
secretJSONString, err := json.Marshal(secretJSON)
if err != nil {
return "", err
}
// Write the JSON string to the secretTemplate file
err = os.WriteFile(secretTemplate, []byte(secretJSONString), 0644)
if err != nil {
return "", err
}
return secretTemplate, nil
} | kata | |||
function | openshift/openshift-tests-private | 2f6612ab-71b5-4294-93d8-b0c970918ba3 | getCloudProvider | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getCloudProvider(oc *exutil.CLI) string {
var (
errMsg error
output string
cloudprovider string
)
err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
output, errMsg = oc.WithoutNamespace().AsAdmin().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output()
if errMsg != nil {
e2e.Logf("Get cloudProvider *failed with* :\"%v\",wait 5 seconds retry.", errMsg)
return false, errMsg
}
cloudprovider = strings.ToLower(output)
if cloudprovider == "none" {
cloudprovider = "libvirt"
}
e2e.Logf("The test cluster cloudProvider is :\"%s\".", strings.ToLower(cloudprovider))
return true, nil
})
exutil.AssertWaitPollNoErr(err, "Waiting for get cloudProvider timeout")
return strings.ToLower(cloudprovider)
} | kata | ||||
function | openshift/openshift-tests-private | fdeb5467-9e9d-4902-ba47-0696f79fd7d5 | createRWOfilePVC | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createRWOfilePVC(oc *exutil.CLI, opNamespace, pvcName, capacity string) (err error) {
// author: [email protected]
// creates a PVC using as much calculated and default paramers as possible, leaving only:
// napespace
// Capacity in Gigs
// Name
// returns err
accessMode := "ReadWriteOnce" //ReadWriteOnce, ReadOnlyMany or ReadWriteMany
volumeMode := "Filesystem" //Filesystem, Block
return createPVC(oc, opNamespace, pvcName, capacity, volumeMode, accessMode)
} | kata | |||||
function | openshift/openshift-tests-private | a9d71cd8-969a-4b0a-9c22-9d1637692f6f | createPVC | ['"encoding/json"', '"path/filepath"', '"strconv"', '"strings"', '"github.com/tidwall/gjson"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createPVC(oc *exutil.CLI, opNamespace, pvcName, capacity, volumeMode, accessMode string) (err error) {
// just single Storage class per platform, block will be supported later?
const jsonCsiClass = `{"azure":{"Filesystem":"azurefile-csi","Block":"managed-csi"},
"gcp":{"Filesystem":"standard-csi","Block":"standard-csi"},
"aws":{"Filesystem":"gp3-csi","Block":"gp3-csi"}}`
cloudPlatform := getCloudProvider(oc)
scName := gjson.Get(jsonCsiClass, strings.Join([]string{cloudPlatform, volumeMode}, `.`)).String()
pvcDataDir := exutil.FixturePath("testdata", "storage")
pvcTemplate := filepath.Join(pvcDataDir, "pvc-template.yaml")
//validate provided capacity is a valid integer
_, err = strconv.Atoi(capacity)
if err != nil {
return err
}
g.By("Create pvc from template")
pvcFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", pvcTemplate,
"-p", "SCNAME="+scName, "-p", "PVCNAME="+pvcName, "-p", "PVCNAMESPACE="+opNamespace,
"-p", "ACCESSMODE="+accessMode, "-p", "VOLUMEMODE="+volumeMode, "-p", "PVCCAPACITY="+capacity).OutputToFile(getRandomString() + "pvc-default.json")
if err != nil {
e2e.Logf("Could not create pvc %v %v", pvcFile, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Applying pvc " + pvcFile)
msg, err := oc.AsAdmin().Run("apply").Args("-f", pvcFile, "-n", opNamespace).Output()
if err != nil {
e2e.Logf("Could not apply pvc %v %v", msg, err)
}
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("pvc apply output: %v", msg)
return err
} | kata | ||||
function | openshift/openshift-tests-private | cc2cae49-63a2-4b63-ac71-8b819b08cd84 | createApplyPeerPodConfigMap | ['"fmt"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createApplyPeerPodConfigMap(oc *exutil.CLI, provider string, ppParam PeerpodParam, opNamespace, ppConfigMapName, ppConfigMapTemplate string) (msg string, err error) {
/*
Reads the configmap that the CI had applied "peerpods-param-cm"
and creates "peer-pods-cm" from it and then applies it on the cluster.
Checks if the cluster already has a peer-pods-cm and also for the correct value of the cloud provider
*/
var (
ciCmName = "peerpods-param-cm"
configFile string
imageID string
)
g.By("Checking if peer-pods-cm exists")
_, err = checkPeerPodConfigMap(oc, opNamespace, provider, ppConfigMapName)
if err == nil {
//check for IMAGE ID in the configmap
msg, err, imageID = CheckPodVMImageID(oc, ppConfigMapName, provider, opNamespace)
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("%v imageID: %v err: %v", msg, imageID, err))
if imageID == "" {
e2e.Logf("peer-pods-cm in the right state - does not have the IMAGE ID before the kataconfig install , msg: %v", msg)
} else {
e2e.Logf("IMAGE ID: %v", imageID)
msgIfErr := fmt.Sprintf("ERROR: peer-pods-cm has the Image ID before the kataconfig is installed, incorrect state: %v %v %v", imageID, msg, err)
o.Expect(imageID).NotTo(o.BeEmpty(), msgIfErr)
}
e2e.Logf("peer-pods-cm exists - skipping creating it")
return msg, err
} else if err != nil {
e2e.Logf("**** peer-pods-cm not found on the cluster - proceeding to create it****")
}
//Read params from peerpods-param-cm and store in ppParam struct
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default").Output()
if err != nil {
e2e.Logf("%v Configmap created by QE CI not found: msg %v err: %v", ciCmName, msg, err)
} else {
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Configmap created by QE CI has error, no .data: %v %v", ciCmName, configmapData, err)
}
e2e.Logf("configmap Data is:\n%v", configmapData)
ppParam, err := parseCIPpConfigMapData(provider, configmapData)
if err != nil {
return msg, err
}
// Create peer-pods-cm file
if provider == "aws" {
configFile, err = createAWSPeerPodsConfigMap(oc, ppParam, ppConfigMapTemplate)
} else if provider == "azure" {
configFile, err = createAzurePeerPodsConfigMap(oc, ppParam, ppConfigMapTemplate)
} else if provider == "libvirt" {
configFile, err = createLibvirtPeerPodsConfigMap(oc, ppParam, ppConfigMapTemplate)
} else {
msg = fmt.Sprintf("Cloud provider %v is not supported", provider)
return msg, fmt.Errorf("%v", msg)
}
if err != nil {
return msg, err
}
// Apply peer-pods-cm file
g.By("(Apply peer-pods-cm file)")
msg, err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Output()
if err != nil {
return fmt.Sprintf("Error: applying peer-pods-cm %v failed: %v %v", configFile, msg, err), err
}
}
return msg, err
} | kata | |||
function | openshift/openshift-tests-private | 4eb3c432-8eef-4482-9eba-5b352fc7371d | createAWSPeerPodsConfigMap | ['"encoding/json"', '"os"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createAWSPeerPodsConfigMap(oc *exutil.CLI, ppParam PeerpodParam, ppConfigMapTemplate string) (string, error) {
g.By("Create peer-pods-cm file")
// Processing configmap template and create " <randomstring>peer-pods-cm.json"
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", ppConfigMapTemplate,
"-p", "VXLAN_PORT="+ppParam.VXLAN_PORT, "PODVM_INSTANCE_TYPE="+ppParam.PODVM_INSTANCE_TYPE,
"PROXY_TIMEOUT="+ppParam.PROXY_TIMEOUT, "AWS_REGION="+ppParam.AWS_REGION,
"AWS_SUBNET_ID="+ppParam.AWS_SUBNET_ID, "AWS_VPC_ID="+ppParam.AWS_VPC_ID,
"AWS_SG_IDS="+ppParam.AWS_SG_IDS).OutputToFile(getRandomString() + "peer-pods-cm.json")
if configFile != "" {
osStatMsg, configFileExists := os.Stat(configFile)
if configFileExists != nil {
e2e.Logf("issue creating peer-pods-cm file %s, err: %v , osStatMsg: %v", configFile, err, osStatMsg)
}
}
return configFile, err
} | kata | |||
function | openshift/openshift-tests-private | dda744aa-1833-4770-a0ee-801c42724085 | createAzurePeerPodsConfigMap | ['"encoding/json"', '"os"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createAzurePeerPodsConfigMap(oc *exutil.CLI, ppParam PeerpodParam, ppConfigMapTemplate string) (string, error) {
g.By("Create peer-pods-cm file")
// Processing configmap template and create " <randomstring>peer-pods-cm.json"
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", ppConfigMapTemplate,
"-p", "VXLAN_PORT="+ppParam.VXLAN_PORT, "AZURE_INSTANCE_SIZE="+ppParam.AZURE_INSTANCE_SIZE,
"AZURE_SUBNET_ID="+ppParam.AZURE_SUBNET_ID, "AZURE_NSG_ID="+ppParam.AZURE_NSG_ID,
"PROXY_TIMEOUT="+ppParam.PROXY_TIMEOUT, "AZURE_REGION="+ppParam.AZURE_REGION,
"AZURE_RESOURCE_GROUP="+ppParam.AZURE_RESOURCE_GROUP).OutputToFile(getRandomString() + "peer-pods-cm.json")
if configFile != "" {
osStatMsg, configFileExists := os.Stat(configFile)
if configFileExists != nil {
e2e.Logf("issue creating peer-pods-cm file %s, err: %v , osStatMsg: %v", configFile, err, osStatMsg)
}
}
return configFile, err
} | kata | |||
function | openshift/openshift-tests-private | 2b24d6fb-972c-4717-b527-e3e9de8567f9 | createLibvirtPeerPodsConfigMap | ['"encoding/json"', '"os"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createLibvirtPeerPodsConfigMap(oc *exutil.CLI, ppParam PeerpodParam, ppConfigMapTemplate string) (string, error) {
g.By("Create peer-pods-cm file")
// Processing configmap template and create " <randomstring>peer-pods-cm.json"
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", ppConfigMapTemplate,
"-p", "PROXY_TIMEOUT="+ppParam.PROXY_TIMEOUT).OutputToFile(getRandomString() + "peer-pods-cm.json")
if configFile != "" {
osStatMsg, configFileExists := os.Stat(configFile)
if configFileExists != nil {
e2e.Logf("issue creating peer-pods-cm file %s, err: %v , osStatMsg: %v", configFile, err, osStatMsg)
}
}
return configFile, err
} | kata | |||
function | openshift/openshift-tests-private | de9d9e11-dfdd-4e78-89fe-c356bac6b6d3 | createLibvirtPeerPodsParamConfigMap | ['"encoding/json"', '"os"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createLibvirtPeerPodsParamConfigMap(oc *exutil.CLI, ppParam PeerpodParam, ppConfigMapTemplate string) (string, error) {
g.By("Create libvirt-podvm-image-cm file")
// Processing configmap template and create " <randomstring>peer-pods-cm.json"
configFile, err := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f", ppConfigMapTemplate,
"-p", "PODVM_DISTRO="+ppParam.LIBVIRT_PODVM_DISTRO, "CAA_SRC="+ppParam.LIBVIRT_CAA_SRC, "CAA_REF="+ppParam.LIBVIRT_CAA_REF, "DOWNLOAD_SOURCES="+ppParam.LIBVIRT_DOWNLOAD_SOURCES, "CONFIDENTIAL_COMPUTE_ENABLED="+ppParam.LIBVIRT_CONFIDENTIAL_COMPUTE_ENABLED, "UPDATE_PEERPODS_CM="+ppParam.LIBVIRT_UPDATE_PEERPODS_CM, "ORG_ID="+ppParam.LIBVIRT_ORG_ID, "BASE_OS_VERSION="+ppParam.LIBVIRT_BASE_OS_VERSION, "IMAGE_NAME="+ppParam.LIBVIRT_IMAGE_NAME, "PODVM_TAG="+ppParam.LIBVIRT_PODVM_TAG, "SE_BOOT="+ppParam.LIBVIRT_SE_BOOT, "PODVM_IMAGE_URI="+ppParam.LIBVIRT_PODVM_IMAGE_URI).OutputToFile(getRandomString() + "peerpods-param-cm.json")
if configFile != "" {
osStatMsg, configFileExists := os.Stat(configFile)
if configFileExists != nil {
e2e.Logf("issue creating libvirt-podvm-image-cm file %s, err: %v , osStatMsg: %v", configFile, err, osStatMsg)
}
}
return configFile, err
} | kata | |||
function | openshift/openshift-tests-private | 945d5eea-9ba5-4065-a739-93ecfc563ab4 | createSSHPeerPodsKeys | ['"fmt"', '"os/exec"', '"strings"', '"github.com/tidwall/gjson"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func createSSHPeerPodsKeys(oc *exutil.CLI, ppParam PeerpodParam, provider string) error {
g.By("Create ssh keys")
keyName := "id_rsa_" + getRandomString()
pubKeyName := keyName + ".pub"
fromFile := []string{"--from-file=id_rsa.pub=./" + pubKeyName}
shredRMCmd := fmt.Sprintf(`shred -f --remove ./%v ./%v`, keyName, pubKeyName)
defer exec.Command("bash", "-c", shredRMCmd).CombinedOutput()
sshKeyGenCmd := fmt.Sprintf(`ssh-keygen -f ./%v -N ""`, keyName)
retCmd, err := exec.Command("bash", "-c", sshKeyGenCmd).CombinedOutput()
if err != nil {
e2e.Logf("the error: %v", string(retCmd))
return err
}
if provider == "libvirt" {
var (
ciCmName = "peerpods-param-cm"
ciSecretName = "peerpods-param-secret"
)
fromFile = append(fromFile, "--from-file=id_rsa=./"+keyName)
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Configmap created by QE CI has error, no .data: %v %v", ciCmName, configmapData, err)
}
ppParam, err = parseCIPpConfigMapData(provider, configmapData)
if err != nil {
e2e.Failf("Error getting ppParam %v", err)
}
secretData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", ciSecretName, "-n", "default", "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Secret created by QE CI has error %v", ciSecretName, err)
}
hostpassword, err := decodeSecret(gjson.Get(secretData, "HOST_PASSWORD").String())
if err != nil {
e2e.Logf("Error: %v CI provided peer pods secret data can't be decoded", err)
return err
}
sshCopyIdCmd := fmt.Sprintf(`sshpass -p %v ssh-copy-id -i ./%v %v`, hostpassword, pubKeyName, ppParam.LIBVIRT_KVM_HOST_ADDRESS)
retCmd, err = exec.Command("bash", "-c", sshCopyIdCmd).CombinedOutput()
if err != nil {
e2e.Logf("the error: %v", string(retCmd))
return err
}
}
sshSecretCmd := append([]string{"-n", "openshift-sandboxed-containers-operator", "secret", "generic", "ssh-key-secret"}, fromFile...)
secretMsg, err := oc.AsAdmin().WithoutNamespace().Run("create").Args(sshSecretCmd...).Output()
if strings.Contains(secretMsg, "already exists") {
e2e.Logf(`ssh-key-secret created and it already exists`)
return nil
}
return err
} | kata | |||
function | openshift/openshift-tests-private | e804f36a-cd2e-4b30-a3a0-1247990294cc | checkLabeledPodsExpectedRunning | ['"fmt"', '"strconv"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkLabeledPodsExpectedRunning(oc *exutil.CLI, resNs, label, expectedRunning string) (msg string, err error) {
// the inputs are strings to be consistant with other check....() functions. This is also what the oc command returns
var (
resType = "pod"
jsonpath = "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}"
podList []string
podName string
number int
failMsg []string
)
podList, err = exutil.GetAllPodsWithLabel(oc, resNs, label)
if err != nil || len(podList) == 0 {
e2e.Failf("Could not get pod names with %v label: %v %v", label, podList, err)
}
number, err = strconv.Atoi(expectedRunning)
if number != len(podList) || err != nil {
e2e.Failf("ERROR: Number of pods %v does not match %v expected pods: %v %v", number, expectedRunning, msg, err)
}
for _, podName = range podList {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, podName, "-n", resNs, jsonpath).Output()
if err != nil || strings.ToLower(msg) != "true" {
failMsg = append(failMsg, fmt.Sprintf("ERROR: %v is not ready: %v %v", podName, msg, err))
}
}
if len(failMsg) != 0 {
e2e.Failf("%v pods are not ready: %v", len(failMsg), failMsg)
}
err = nil
msg = fmt.Sprintf("All %v pods ready %v)", expectedRunning, podList)
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | d06c7f9a-f1ff-4473-ba0c-49baae0ca0bb | checkResourceJsonpathMatch | ['"fmt"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkResourceJsonpathMatch(oc *exutil.CLI, resType, resName, resNs, jsonPath1, jsonPath2 string) (expectedMatch, msg string, err error) {
// the inputs are strings to be consistant with other check....() functions. This is also what the oc command returns
var (
duration time.Duration = 300
interval time.Duration = 10
)
_, _ = checkResourceExists(oc, resType, resName, resNs, duration, interval)
expectedMatch, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, resName, "-n", resNs, jsonPath1).Output()
if err != nil || expectedMatch == "" {
e2e.Failf("ERROR: could not get %v from %v %v: %v %v", jsonPath1, resType, resName, expectedMatch, err)
}
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, resName, "-n", resNs, jsonPath2).Output()
if err != nil || msg == "" {
e2e.Failf("ERROR: could not get %v from %v %v: %v %v", jsonPath2, resType, resName, msg, err)
}
if expectedMatch != msg {
e2e.Failf("ERROR: %v (%v) does not match %v (%v)", jsonPath1, expectedMatch, jsonPath2, msg)
}
err = nil
msg = fmt.Sprintf("%v (%v) == %v (%v)", jsonPath1, expectedMatch, jsonPath2, msg)
return expectedMatch, msg, err
} | kata | ||||
function | openshift/openshift-tests-private | 1d5e7f82-ac58-45e1-9e46-d7ca30234a30 | clusterHasEnabledFIPS | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func clusterHasEnabledFIPS(oc *exutil.CLI, subscriptionNamespace string) bool {
firstNode, err := exutil.GetFirstMasterNode(oc)
msgIfErr := fmt.Sprintf("ERROR Could not get first node to check FIPS '%v' %v", firstNode, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(firstNode).NotTo(o.BeEmpty(), msgIfErr)
fipsModeStatus, err := oc.AsAdmin().Run("debug").Args("-n", subscriptionNamespace, "node/"+firstNode, "--", "chroot", "/host", "fips-mode-setup", "--check").Output()
msgIfErr = fmt.Sprintf("ERROR Could not check FIPS on node %v: '%v' %v", firstNode, fipsModeStatus, err)
o.Expect(err).NotTo(o.HaveOccurred(), msgIfErr)
o.Expect(fipsModeStatus).NotTo(o.BeEmpty(), msgIfErr)
// This will be true or false
return strings.Contains(fipsModeStatus, "FIPS mode is enabled.")
} | kata | ||||
function | openshift/openshift-tests-private | 8f16ed2f-1e3c-4429-bf66-50164f8760e0 | patchPeerPodLimit | ['"fmt"', '"io"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func patchPeerPodLimit(oc *exutil.CLI, opNamespace, newLimit string) {
patchLimit := "{\"spec\":{\"limit\":\"" + newLimit + "\"}}"
msg, err := oc.AsAdmin().Run("patch").Args("peerpodconfig", "peerpodconfig-openshift", "-n",
opNamespace, "--type", "merge", "--patch", patchLimit).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not patch podvm limit to %v\n error: %v %v", newLimit, msg, err))
currentLimit := getPeerPodLimit(oc, opNamespace)
o.Expect(currentLimit).To(o.Equal(newLimit))
//check node untill the new value is propagated
jsonpath := "-o=jsonpath='{.status.allocatable.kata\\.peerpods\\.io/vm}'"
nodeName, _ := exutil.GetFirstWorkerNode(oc)
nodeLimit, _ := checkResourceJsonpath(oc, "node", nodeName, opNamespace, jsonpath, newLimit, 30*time.Second, 5*time.Second)
e2e.Logf("node podvm limit is %v", nodeLimit)
o.Expect(strings.Trim(nodeLimit, "'")).To(o.Equal(newLimit))
} | kata | ||||
function | openshift/openshift-tests-private | b109ad8a-33b0-4b42-be93-77337c2312e5 | getPeerPodLimit | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getPeerPodLimit(oc *exutil.CLI, opNamespace string) (podLimit string) {
jsonpathLimit := "-o=jsonpath={.spec.limit}"
podLimit, err := oc.AsAdmin().Run("get").Args("peerpodconfig", "peerpodconfig-openshift", "-n", opNamespace, jsonpathLimit).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not find %v in %v\n Error: %v", jsonpathLimit, "peerpodconfig-openshift", err))
e2e.Logf("peerpodconfig podvm limit is %v", podLimit)
return podLimit
} | kata | ||||
function | openshift/openshift-tests-private | 1f2431be-fb00-45cf-9c2c-a7729810eb45 | getPeerPodMetadataInstanceType | ['"net/http"', '"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getPeerPodMetadataInstanceType(oc *exutil.CLI, opNamespace, podName, provider string) (string, error) {
metadataCurl := map[string][]string{
"aws": {"http://169.254.169.254/latest/meta-data/instance-type"},
"azure": {"-H", "Metadata:true", "\\*", "http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2023-07-01&format=text"},
}
podCmd := []string{"-n", opNamespace, podName, "--", "curl", "-s"}
msg, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(append(podCmd, metadataCurl[provider]...)...).Output()
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | 302a6e8c-a236-4d78-ab3e-b2850a926f4f | getPeerPodMetadataTags | ['"net/http"', '"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getPeerPodMetadataTags(oc *exutil.CLI, opNamespace, podName, provider string) (string, error) {
//AWS have to enable tags by 1st finding instance-id from metadata
//curl -s http://169.254.169.254/latest/meta-data/instance-id
//aws ec2 modify-instance-metadata-options --instance-id i-0a893c6458c272d12 --instance-metadata-tags enabled
//curl -s http://169.254.169.254/latest/meta-data/tags/instance/key1
//value1
metadataCurl := map[string][]string{
"aws": {"http://169.254.169.254/latest/meta-data/tags/instance/key1"},
"azure": {"-H", "Metadata:true", "\\*", "http://169.254.169.254/metadata/instance/compute/tags?api-version=2023-07-01&format=text"},
}
//azure pod tags format is different from the configmap="key1=value1,key2=value2":
// sh-4.4$ curl -s -H "Metadata:true" "\\*" "http://169.254.169.254/metadata/instance/compute/tags?api-version=2023-07-01&format=text"
// key1:value1;key2:value2sh-4.4$
podCmd := []string{"-n", opNamespace, podName, "--", "curl", "-s"}
msg, err := oc.WithoutNamespace().AsAdmin().Run("exec").Args(append(podCmd, metadataCurl[provider]...)...).Output()
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | e8d8036d-3a0d-46ac-b376-d343823ae557 | CheckPodVMImageID | ['"fmt"', '"github.com/tidwall/gjson"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func CheckPodVMImageID(oc *exutil.CLI, ppConfigMapName, provider, opNamespace string) (msg string, err error, imageID string) {
cloudProviderMap := map[string]string{
"aws": "PODVM_AMI_ID",
"azure": "AZURE_IMAGE_ID",
}
// Fetch the configmap details
msg, err = oc.AsAdmin().Run("get").Args("configmap", ppConfigMapName, "-n", opNamespace, "-o=jsonpath={.data}").Output()
if err != nil {
return "Error fetching configmap details", err, ""
}
imageIDParam := cloudProviderMap[provider]
if !gjson.Get(msg, imageIDParam).Exists() {
// Handle the case when imageIDParam is not found
e2e.Logf("Image ID parameter '%s' not found in the config map", imageIDParam)
return fmt.Sprintf("CM created does not have: %s", imageIDParam), nil, ""
}
imageID = gjson.Get(msg, imageIDParam).String()
if imageID == "" {
// Handle the case when imageIDParam is an empty string
e2e.Logf("Image ID parameter found in the config map but is an empty string; Image ID :%s", imageIDParam)
return fmt.Sprintf("CM created has an empty value for Image ID : %s", imageIDParam), nil, ""
}
return "CM does have the Image ID", nil, imageID
} | kata | ||||
function | openshift/openshift-tests-private | 0d85999e-1a51-4449-8a93-eccc1428aeb7 | getTestRunConfigmap | ['"encoding/json"', '"fmt"', '"io"', '"strings"', '"github.com/tidwall/gjson"'] | ['TestRunDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getTestRunConfigmap(oc *exutil.CLI, testrun *TestRunDescription, testrunConfigmapNs, testrunConfigmapName string) (configmapExists bool, err error) {
configmapExists = true
if testrun.checked { // its been checked
return configmapExists, nil
}
errorMessage := ""
// testrun.checked should == false
configmapJson, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", testrunConfigmapNs, testrunConfigmapName, "-o", "json").Output()
if err != nil {
e2e.Logf("Configmap is not found: %v %v", configmapJson, err)
testrun.checked = true // we checked, it doesn't exist
return false, nil
}
// testrun.checked should still == false
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", testrunConfigmapNs, testrunConfigmapName, "-o", "jsonpath={.data}").Output()
if err != nil {
e2e.Logf("Configmap %v has error %v, no .data: %v %v", testrunConfigmapName, configmapJson, configmapData, err)
return configmapExists, err
}
e2e.Logf("configmap file %v found. Data is:\n%v", testrunConfigmapName, configmapData)
if gjson.Get(configmapData, "catalogsourcename").Exists() {
testrun.catalogSourceName = gjson.Get(configmapData, "catalogsourcename").String()
} else {
errorMessage = fmt.Sprintf("catalogsourcename is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "channel").Exists() {
testrun.channel = gjson.Get(configmapData, "channel").String()
} else {
errorMessage = fmt.Sprintf("channel is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "redirectNeeded").Exists() {
testrun.redirectNeeded = gjson.Get(configmapData, "redirectNeeded").Bool()
} else {
errorMessage = fmt.Sprintf("redirectNeeded is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "mustgatherimage").Exists() {
testrun.mustgatherImage = gjson.Get(configmapData, "mustgatherimage").String()
if strings.Contains(testrun.mustgatherImage, "brew.registry.redhat.io") {
testrun.redirectNeeded = true
}
} else {
errorMessage = fmt.Sprintf("mustgatherimage is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "eligibility").Exists() {
testrun.eligibility = gjson.Get(configmapData, "eligibility").Bool()
} else {
errorMessage = fmt.Sprintf("eligibility is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "eligibleSingleNode").Exists() {
testrun.eligibleSingleNode = gjson.Get(configmapData, "eligibleSingleNode").Bool()
} else {
errorMessage = fmt.Sprintf("eligibleSingleNode is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "labelSingleNode").Exists() {
testrun.labelSingleNode = gjson.Get(configmapData, "labelsinglenode").Bool()
} else {
errorMessage = fmt.Sprintf("labelSingleNode is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "operatorVer").Exists() {
testrun.operatorVer = gjson.Get(configmapData, "operatorVer").String()
} else {
errorMessage = fmt.Sprintf("operatorVer is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "runtimeClassName").Exists() {
testrun.runtimeClassName = gjson.Get(configmapData, "runtimeClassName").String()
} else {
errorMessage = fmt.Sprintf("runtimeClassName is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "enablePeerPods").Exists() {
testrun.enablePeerPods = gjson.Get(configmapData, "enablePeerPods").Bool()
} else {
errorMessage = fmt.Sprintf("enablePeerPods is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "enableGPU").Exists() {
testrun.enableGPU = gjson.Get(configmapData, "enableGPU").Bool()
} else {
errorMessage = fmt.Sprintf("enableGPU is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "podvmImageUrl").Exists() {
testrun.podvmImageUrl = gjson.Get(configmapData, "podvmImageUrl").String()
} else {
errorMessage = fmt.Sprintf("podvmImageUrl is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "workloadImage").Exists() {
testrun.workloadImage = gjson.Get(configmapData, "workloadImage").String()
} else {
errorMessage = fmt.Sprintf("workloadImage is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "installKataRPM").Exists() {
testrun.installKataRPM = gjson.Get(configmapData, "installKataRPM").Bool()
} else {
errorMessage = fmt.Sprintf("installKataRPM is missing from data\n%v", errorMessage)
}
if gjson.Get(configmapData, "workloadToTest").Exists() {
testrun.workloadToTest = gjson.Get(configmapData, "workloadToTest").String()
workloadAllowed := false
for _, v := range allowedWorkloadTypes {
if v == testrun.workloadToTest {
workloadAllowed = true
}
}
if !workloadAllowed {
errorMessage = fmt.Sprintf("workloadToTest (%v) is not one of the allowed workloads (%v)\n%v", testrun.workloadToTest, allowedWorkloadTypes, errorMessage)
}
} else {
errorMessage = fmt.Sprintf("workloadToTest is missing from data\n%v", errorMessage)
}
// only if testing coco workloads
// not required yet, so set defaults
if testrun.workloadToTest == "coco" {
trusteeErrorMessage := ""
if gjson.Get(configmapData, "trusteeCatalogSourcename").Exists() {
testrun.trusteeCatalogSourcename = gjson.Get(configmapData, "trusteeCatalogSourcename").String()
} else {
testrun.trusteeCatalogSourcename = "redhat-operators"
trusteeErrorMessage = fmt.Sprintf("workload is coco and trusteeCatalogSourcename is missing from data\n%v", trusteeErrorMessage)
}
if gjson.Get(configmapData, "trusteeUrl").Exists() {
// if blank, in-cluster trustee will be used
testrun.trusteeUrl = gjson.Get(configmapData, "trusteeUrl").String()
}
if trusteeErrorMessage != "" {
e2e.Logf("Some of the trustee data was not in osc-config. Using defaults in those cases:\n%v", trusteeErrorMessage)
}
}
if errorMessage != "" {
err = fmt.Errorf("%v", errorMessage)
// testrun.checked still == false. Setup is wrong & all tests will fail
} else {
testrun.checked = true // No errors, we checked
}
return configmapExists, err
} | kata | |||
function | openshift/openshift-tests-private | dc0dcce5-51c2-4196-ad69-056e23e51372 | getTestRunParameters | ['SubscriptionDescription', 'KataconfigDescription', 'TestRunDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getTestRunParameters(oc *exutil.CLI, subscription *SubscriptionDescription, kataconfig *KataconfigDescription, testrun *TestRunDescription, testrunConfigmapNs, testrunConfigmapName string) (configmapExists bool, err error) {
configmapExists = true
if testrun.checked { // already have everything & final values == Input values
return configmapExists, nil
}
configmapExists, err = getTestRunConfigmap(oc, testrun, testrunConfigmapNs, testrunConfigmapName)
if err != nil {
// testrun.checked should be false
return configmapExists, err
}
// no errors testrun.checked should be true
if configmapExists { // Then testrun changed & subscription & kataconfig should too
subscription.catalogSourceName = testrun.catalogSourceName
subscription.channel = testrun.channel
kataconfig.eligibility = testrun.eligibility
kataconfig.runtimeClassName = testrun.runtimeClassName
kataconfig.enablePeerPods = testrun.enablePeerPods
}
return configmapExists, nil
} | kata | ||||
function | openshift/openshift-tests-private | 9f50a371-eafa-4d5a-a473-688f6a825026 | getUpgradeCatalogConfigMap | ['"fmt"', '"strings"'] | ['UpgradeCatalogDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getUpgradeCatalogConfigMap(oc *exutil.CLI, upgradeCatalog *UpgradeCatalogDescription) (err error) {
upgradeCatalog.exists = false
// need a checkResourceExists that doesn't fail when not found.
configMaps, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", upgradeCatalog.namespace, "-o=jsonpath={.items..metadata.name}").Output()
if err != nil {
err = fmt.Errorf("cannot get configmaps in ns %v: Configmaps=[%v] Error:%w", upgradeCatalog.namespace, configMaps, err)
upgradeCatalog.exists = true // override skip if there is an error
return err
}
if strings.Contains(configMaps, upgradeCatalog.name) {
upgradeCatalog.exists = true
}
if !upgradeCatalog.exists { // no cm is not error
return nil
}
upgradeCatalog.imageAfter, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", upgradeCatalog.namespace, upgradeCatalog.name, "-o=jsonpath={.data.imageAfter}").Output()
if err != nil || upgradeCatalog.imageAfter == "" {
err = fmt.Errorf("The %v configmap is missing the imageAfter: %v %v", upgradeCatalog.name, upgradeCatalog.imageAfter, err)
return err
}
upgradeCatalog.imageBefore, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("catsrc", "-n", "openshift-marketplace", upgradeCatalog.catalogName, "-o=jsonpath={.spec.image}").Output()
if err != nil {
err = fmt.Errorf("Could not get the current image from the %v catsrc %v %v", upgradeCatalog.catalogName, upgradeCatalog.imageBefore, err)
return err
}
return nil
} | kata | |||
function | openshift/openshift-tests-private | 8fa08621-f28d-4bc4-bdcb-508857dccd3d | changeCatalogImage | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func changeCatalogImage(oc *exutil.CLI, catalogName, catalogImage string) (err error) {
patch := fmt.Sprintf("{\"spec\":{\"image\":\"%v\"}}", catalogImage)
msg, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("catsrc", catalogName, "--type", "merge", "-p", patch, "-n", "openshift-marketplace").Output()
if err != nil {
err = fmt.Errorf("Could not patch %v %v %v", catalogName, msg, err)
return err
}
msg, err = oc.AsAdmin().Run("get").Args("catsrc", catalogName, "-n", "openshift-marketplace", "-o=jsonpath={.spec.image}").Output()
if err != nil || msg != catalogImage {
err = fmt.Errorf("Catalog patch did not change image to %v %v %v", catalogImage, msg, err)
return err
}
waitForCatalogReadyOrFail(oc, catalogName)
return nil
} | kata | ||||
function | openshift/openshift-tests-private | ff344026-c532-4d5d-9a80-907938e50f42 | waitForCatalogReadyOrFail | ['"time"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func waitForCatalogReadyOrFail(oc *exutil.CLI, catalogName string) {
_, _ = checkResourceJsonpath(oc, "catsrc", catalogName, "openshift-marketplace", "-o=jsonpath={.status.connectionState.lastObservedState}", "READY", 300*time.Second, 10*time.Second)
} | kata | ||||
function | openshift/openshift-tests-private | eb91bf3f-4692-466f-bcdc-868aa2d2244d | checkResourceJsonPathChanged | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkResourceJsonPathChanged(oc *exutil.CLI, resType, resName, resNs, jsonpath, currentValue string, duration, interval time.Duration) (newValue string, err error) {
// watch a resource that has a known value until it changes. Return the new value
errCheck := wait.PollImmediate(interval, duration, func() (bool, error) {
newValue, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(resType, resName, "-n", resNs, jsonpath).Output()
if newValue != currentValue && err == nil {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("%v %v in ns %v is not in %v state after %v sec: %v %v", resType, resName, resNs, currentValue, duration, newValue, err))
return newValue, nil
} | kata | ||||
function | openshift/openshift-tests-private | 89b83ffe-7e66-4e8c-b864-a084838c1e68 | waitForPodsToTerminate | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func waitForPodsToTerminate(oc *exutil.CLI, namespace, listOfPods string) {
var (
podStillRunning bool
currentPods string
)
errCheck := wait.PollImmediate(10*time.Second, snooze*time.Second, func() (bool, error) {
podStillRunning = false
currentPods, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-o=jsonpath={.items..metadata.name}").Output()
for _, pod := range strings.Fields(listOfPods) {
if strings.Contains(currentPods, pod) {
podStillRunning = true
break
}
}
if podStillRunning {
return false, nil
}
return true, nil
})
currentPods, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-o=jsonpath={.items..metadata.name}").Output()
exutil.AssertWaitPollNoErr(errCheck, fmt.Sprintf("Timeout waiting for a (%v) pods to terminate. Current pods %v running", listOfPods, currentPods))
} | kata | ||||
function | openshift/openshift-tests-private | f196d20d-84bb-4195-92a0-bdffba86b163 | patchPodvmEnableGPU | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func patchPodvmEnableGPU(oc *exutil.CLI, opNamespace, cmName, enableGpu string) {
patchGPU := "{\"data\":{\"ENABLE_NVIDIA_GPU\":\"" + enableGpu + "\"}}"
msg, err := oc.AsAdmin().Run("patch").Args("configmap", cmName, "-n",
opNamespace, "--type", "merge", "--patch", patchGPU).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not patch ENABLE_NVIDIA_GPU to %v\n error: %v %v", enableGpu, msg, err))
currentGPU := getPodvmEnableGPU(oc, opNamespace, cmName)
o.Expect(currentGPU).To(o.Equal(enableGpu))
} | kata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.