element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
test case | openshift/openshift-tests-private | 42118513-3de0-4ae9-876b-4bf88ea4f93a | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-High-28845-Hive give a way to override the API URL of managed cluster[Serial] | ['"fmt"', '"path/filepath"', '"strings"', '"time"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-High-28845-Hive give a way to override the API URL of managed cluster[Serial]", func() {
testCaseID := "28845"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config cd Install-Config Secret...")
installConfigSecret := installConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
name2: cdName,
region: AWSRegion,
template: filepath.Join(testDataDir, "aws-install-config.yaml"),
}
exutil.By("Config ClusterDeployment...")
cluster := clusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
clusterName: cdName,
platformType: "aws",
credRef: AWSCreds,
region: AWSRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 1,
template: filepath.Join(testDataDir, "clusterdeployment.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Check install status...")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
exutil.By("edit the cd CRs apiURLOverride field with a vaild apiURL")
ValidApiUrl := "https://api." + cdName + ".qe.devcluster.openshift.com:6443"
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("cd", cdName, "-n", oc.Namespace(), "--type=merge", "-p", fmt.Sprintf("{\"spec\":{\"controlPlaneConfig\":{\"apiURLOverride\": \"%s\"}}}", ValidApiUrl)).Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stdout).To(o.ContainSubstring("clusterdeployment.hive.openshift.io/" + cdName + " patched"))
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "True", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath='{.status.conditions[?(@.type == \"ActiveAPIURLOverride\")].status}'"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "ClusterReachable", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath='{.status.conditions[?(@.type == \"ActiveAPIURLOverride\")].reason}'"}).check(oc)
exutil.By("edit the cd CRs apiURLOverride field with an invaild apiURL")
InvalidApiUrl := "https://api." + cdName + "-non-exist.qe.devcluster.openshift.com:6443"
stdout, _, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("cd", cdName, "-n", oc.Namespace(), "--type=merge", "-p", fmt.Sprintf("{\"spec\":{\"controlPlaneConfig\":{\"apiURLOverride\": \"%s\"}}}", InvalidApiUrl)).Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stdout).To(o.ContainSubstring("clusterdeployment.hive.openshift.io/" + cdName + " patched"))
waitForAPIWaitFailure := func() bool {
condition := getCondition(oc, "ClusterDeployment", cdName, oc.Namespace(), "ActiveAPIURLOverride")
if status, ok := condition["status"]; !ok || status != "False" {
e2e.Logf("For condition ActiveAPIURLOverride, expected status is False, actual status is %v, retrying ...", status)
return false
}
if reason, ok := condition["reason"]; !ok || reason != "ErrorConnectingToCluster" {
e2e.Logf("For condition ActiveAPIURLOverride, expected reason is ErrorConnectingToCluster, actual reason is %v, retrying ...", reason)
return false
}
if message, ok := condition["message"]; !ok || !strings.Contains(message, "no such host") {
e2e.Logf("For condition ActiveAPIURLOverride, expected message is no such host, actual reason is %v, retrying ...", message)
return false
}
e2e.Logf("For condition ActiveAPIURLOverride, fields status, reason & message all expected, proceeding to the next step ...")
return true
}
o.Eventually(waitForAPIWaitFailure).WithTimeout(DefaultTimeout * time.Second).WithPolling(3 * time.Second).Should(o.BeTrue())
exutil.By("edit the cd CRs apiURLOverride field with a vaild apiURL again")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, fmt.Sprintf("clusterdeployment.hive.openshift.io/"+cdName+" patched"), ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "--type", "merge", "-p", fmt.Sprintf("{\"spec\":{\"controlPlaneConfig\":{\"apiURLOverride\": \"%s\"}}}", ValidApiUrl)}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "True", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath='{.status.conditions[?(@.type == \"ActiveAPIURLOverride\")].status}'"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "ClusterReachable", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath='{.status.conditions[?(@.type == \"ActiveAPIURLOverride\")].reason}'"}).check(oc)
}) | |||||
test case | openshift/openshift-tests-private | 62c82a2a-28d9-45c7-bc10-c242b6ececa0 | Author:kcui-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Medium-32007-Hive can prevent cluster deletion accidentally via a set on hiveconfig[Serial] | ['"encoding/json"', '"fmt"', '"path/filepath"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("Author:kcui-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Medium-32007-Hive can prevent cluster deletion accidentally via a set on hiveconfig[Serial]", func() {
exutil.By("Add \"deleteProtection: enabled\" in hiveconfig.spec")
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("hiveconfig", "hive", "--type=json", "-p", `[{"op":"remove", "path": "/spec/deleteProtection"}]`).Execute()
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("hiveconfig/hive", "--type", `merge`, `--patch={"spec": {"deleteProtection": "enabled"}}`).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check modifying is successful")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "enabled", ok, DefaultTimeout, []string{"hiveconfig", "hive", "-o=jsonpath={.spec.deleteProtection}"}).check(oc)
testCaseID := "32007"
cdName1 := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
cdName2 := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config cd1 Install-Config Secret...")
installConfigSecret := installConfig{
name1: cdName1 + "-install-config",
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
name2: cdName1,
region: AWSRegion,
template: filepath.Join(testDataDir, "aws-install-config.yaml"),
}
exutil.By("Config ClusterDeployment1...")
clusterImageSetName1 := cdName1 + "-imageset"
cluster1 := clusterDeployment{
fake: "true",
name: cdName1,
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
clusterName: cdName1,
platformType: "aws",
credRef: AWSCreds,
region: AWSRegion,
imageSetRef: clusterImageSetName1,
installConfigSecret: cdName1 + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment.yaml"),
}
defer cleanCD(oc, cluster1.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster1.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster1)
exutil.By("Creating cd2 install-config Secret ...")
installConfigSecretName := cdName2 + "-install-config"
installConfigSecret = installConfig{
name1: installConfigSecretName,
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
name2: cdName2,
region: AWSRegion,
template: filepath.Join(testDataDir, "aws-install-config.yaml"),
}
defer cleanupObjects(oc, objectTableRef{"Secret", oc.Namespace(), installConfigSecretName})
installConfigSecret.create(oc)
exutil.By("Creating cd2 ClusterImageSet")
clusterImageSetName2 := cdName2 + "-imageset"
imageSet := clusterImageSet{
name: clusterImageSetName2,
releaseImage: testOCPImage,
template: filepath.Join(testDataDir, "clusterimageset.yaml"),
}
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", clusterImageSetName2})
imageSet.create(oc)
exutil.By("Creating cd2")
cluster2 := clusterDeployment{
fake: "true",
name: cdName2,
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
clusterName: cdName2,
platformType: "aws",
credRef: AWSCreds,
region: AWSRegion,
imageSetRef: clusterImageSetName2,
installConfigSecret: installConfigSecretName,
pullSecretRef: PullSecret,
template: filepath.Join(testDataDir, "clusterdeployment.yaml"),
installAttemptsLimit: 3,
}
defer cleanupObjects(oc, objectTableRef{"ClusterDeployment", oc.Namespace(), cdName2})
cluster2.create(oc)
exutil.By("Add annotations hive.openshift.io/protected-delete: \"false\" in cd2 CRs")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, fmt.Sprintf("clusterdeployment.hive.openshift.io/"+cdName2+" patched"), ok, DefaultTimeout, []string{"ClusterDeployment", cdName2, "-n", oc.Namespace(), "--type", "merge", "-p", "{\"metadata\":{\"annotations\":{\"hive.openshift.io/protected-delete\": \"false\"}}}"}).check(oc)
exutil.By("Check Hive add the \"hive.openshift.io/protected-delete\" annotation to cd1 after installation")
e2e.Logf("Check cd1 is installed.")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, FakeClusterInstallTimeout, []string{"ClusterDeployment", cdName1, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, DefaultTimeout, []string{"ClusterDeployment", cdName1, "-n", oc.Namespace(), "-o=jsonpath='{.metadata.annotations.hive\\.openshift\\.io/protected-delete}'"}).check(oc)
exutil.By("delete cd1 will failed")
_, stderr, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ClusterDeployment", cdName1, "-n", oc.Namespace()).Outputs()
o.Expect(err).To(o.HaveOccurred())
o.Expect(stderr).To(o.ContainSubstring("metadata.annotations.hive.openshift.io/protected-delete: Invalid value: \"true\": cannot delete while annotation is present"))
exutil.By("edit hive.openshift.io/protected-delete: to \"false\" in cd1")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, fmt.Sprintf("clusterdeployment.hive.openshift.io/"+cdName1+" patched"), ok, DefaultTimeout, []string{"ClusterDeployment", cdName1, "-n", oc.Namespace(), "--type", "merge", "-p", "{\"metadata\":{\"annotations\":{\"hive.openshift.io/protected-delete\": \"false\"}}}"}).check(oc)
exutil.By("delete cd1 again and success")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("ClusterDeployment", cdName1, "-n", oc.Namespace()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check cd1 has been deleted.")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, cdName1, nok, FakeClusterInstallTimeout, []string{"ClusterDeployment", "-n", oc.Namespace()}).check(oc)
exutil.By("Check Hive didn't rewrite the \"hive.openshift.io/protected-delete\" annotation to cd2 after installation")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "false", ok, DefaultTimeout, []string{"ClusterDeployment", cdName2, "-n", oc.Namespace(), "-o=jsonpath='{.metadata.annotations.hive\\.openshift\\.io/protected-delete}'"}).check(oc)
exutil.By("delete cd2 success")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("ClusterDeployment", cdName2, "-n", oc.Namespace()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check cd2 has been deleted.")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, cdName2, nok, FakeClusterInstallTimeout, []string{"ClusterDeployment", "-n", oc.Namespace()}).check(oc)
}) | |||||
test case | openshift/openshift-tests-private | 77eb4b2c-c3b9-4685-b5ef-0e3b828192b7 | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-High-29907-Hive handles owner references after Velero restore[Serial] | ['"encoding/json"', '"path/filepath"', '"time"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-High-29907-Hive handles owner references after Velero restore[Serial]", func() {
testCaseID := "29907"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config Install-Config Secret...")
installConfigSecret := installConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: cdName + "." + AWSBaseDomain,
name2: cdName,
region: AWSRegion,
template: filepath.Join(testDataDir, "aws-install-config.yaml"),
}
exutil.By("Create Route53-aws-creds in hive namespace")
createRoute53AWSCreds(oc, oc.Namespace())
exutil.By("Config ClusterDeployment...")
cluster := clusterDeployment{
fake: "true",
name: cdName,
namespace: oc.Namespace(),
baseDomain: cdName + "." + AWSBaseDomain,
clusterName: cdName,
manageDNS: true,
platformType: "aws",
credRef: AWSCreds,
region: AWSRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
template: filepath.Join(testDataDir, "clusterdeployment.yaml"),
installAttemptsLimit: 3,
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
e2e.Logf("Check dnszone has been created.")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, cdName+"-zone", ok, DefaultTimeout, []string{"dnszone", "-n", oc.Namespace()}).check(oc)
exutil.By("check and record the messages of .metadata.ownerReferences1 and .metadata.resourceVersion1")
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dnszone", cdName+"-zone", "-n", oc.Namespace(), "-o=jsonpath={.metadata.ownerReferences[0]}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
var ownerReferences1 map[string]any
err = json.Unmarshal([]byte(stdout), &ownerReferences1)
o.Expect(err).NotTo(o.HaveOccurred())
resourceVersion1, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dnszone", cdName+"-zone", "-n", oc.Namespace(), "-o=jsonpath={.metadata.resourceVersion}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("delete ownerReferences of the dnszone")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("dnszone", cdName+"-zone", "-n", oc.Namespace(), "--type=json", "-p", `[{"op":"remove", "path": "/metadata/ownerReferences"}]`).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check and record the messages of .metadata.ownerReferences2 and .metadata.resourceVersion2")
stdout, _, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("dnszone", cdName+"-zone", "-n", oc.Namespace(), "-o=jsonpath={.metadata.ownerReferences[0]}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
var ownerReferences2 map[string]any
err = json.Unmarshal([]byte(stdout), &ownerReferences2)
o.Expect(err).NotTo(o.HaveOccurred())
resourceVersion2, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("dnszone", cdName+"-zone", "-n", oc.Namespace(), "-o=jsonpath={.metadata.resourceVersion}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("check the .metadata.ownerReferences is the same as before and the .metadata.resourceVersion is different")
CheckSameOrNot := func() bool {
if ownerReferences1["apiVersion"] == "" || ownerReferences1["blockOwnerDeletion"] != true || ownerReferences1["controller"] != true ||
ownerReferences1["kind"] != "ClusterDeployment" || ownerReferences1["name"] != cdName || ownerReferences1["uid"] == "" || resourceVersion1 == "" {
e2e.Logf("messages of ownerReferences1 or resourceVersion1 is wrong")
return false
}
if ownerReferences2["apiVersion"] == "" || ownerReferences2["blockOwnerDeletion"] != true || ownerReferences2["controller"] != true ||
ownerReferences2["kind"] != "ClusterDeployment" || ownerReferences2["name"] != cdName || ownerReferences2["uid"] == "" || resourceVersion2 == "" {
e2e.Logf("messages of ownerReferences2 or resourceVersion2 is wrong")
return false
}
if ownerReferences1["apiVersion"] != ownerReferences2["apiVersion"] || ownerReferences1["uid"] != ownerReferences2["uid"] || resourceVersion1 == resourceVersion2 {
e2e.Logf("ownerReferences1 or resourceVersion1 doesn't match the ownerReferences2 or resourceVersion2")
return false
}
return true
}
o.Eventually(CheckSameOrNot).WithTimeout(15 * time.Second).WithPolling(3 * time.Second).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 3c9fc238-e410-4468-b103-e52373ee9699 | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-Medium-30089-Hive components will be teared down when HiveConfig is deleted[Disruptive] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-Medium-30089-Hive components will be teared down when HiveConfig is deleted[Disruptive]", func() {
exutil.By("Check the hive-controllers and hiveadmission are running")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", ok, DefaultTimeout, []string{"pods", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", ok, DefaultTimeout, []string{"pods", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", ok, DefaultTimeout, []string{"deployment", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", ok, DefaultTimeout, []string{"deployment", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", ok, DefaultTimeout, []string{"svc", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", ok, DefaultTimeout, []string{"svc", "-n", "hive"}).check(oc)
exutil.By("Delete hiveconfig")
newCheck("expect", "delete", asAdmin, withoutNamespace, contain, "hiveconfig.hive.openshift.io \"hive\" deleted", ok, DefaultTimeout, []string{"hiveconfig", "hive"}).check(oc)
exutil.By("Check hive-controllers and hiveadmission were teared down or deleted")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", nok, DefaultTimeout, []string{"pods", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", nok, DefaultTimeout, []string{"pods", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", nok, DefaultTimeout, []string{"deployment", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", nok, DefaultTimeout, []string{"deployment", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", nok, DefaultTimeout, []string{"svc", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", nok, DefaultTimeout, []string{"svc", "-n", "hive"}).check(oc)
exutil.By("Create the hive resources again")
hc.createIfNotExist(oc)
exutil.By("Check the resources again")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", ok, DefaultTimeout, []string{"pods", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", ok, DefaultTimeout, []string{"pods", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", ok, DefaultTimeout, []string{"deployment", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", ok, DefaultTimeout, []string{"deployment", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", ok, DefaultTimeout, []string{"svc", "-n", "hive"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", ok, DefaultTimeout, []string{"svc", "-n", "hive"}).check(oc)
}) | ||||||
test case | openshift/openshift-tests-private | 5ac73f2d-6373-45e9-a7b2-979062fd1018 | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-Medium-35209-[HiveSpec] Allow setting lifetime for claims[Serial] | ['"fmt"', '"math"', '"path/filepath"', '"strconv"', '"strings"', '"time"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/credentials"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-Medium-35209-[HiveSpec] Allow setting lifetime for claims[Serial]", func() {
testCaseID := "35209"
poolName := "pool-" + testCaseID
imageSetName := poolName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
exutil.By("Check if ClusterImageSet was created successfully")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, imageSetName, ok, DefaultTimeout, []string{"ClusterImageSet"}).check(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and aws-creds to target namespace for the pool
exutil.By("Copy AWS platform credentials...")
createAWSCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create ClusterPool...")
poolTemp := filepath.Join(testDataDir, "clusterpool.yaml")
pool := clusterPool{
name: poolName,
namespace: oc.Namespace(),
fake: "true",
baseDomain: AWSBaseDomain,
imageSetRef: imageSetName,
platformType: "aws",
credRef: AWSCreds,
region: AWSRegion,
pullSecretRef: PullSecret,
size: 4,
maxSize: 4,
runningCount: 4,
maxConcurrent: 4,
hibernateAfter: "360m",
template: poolTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterPool", oc.Namespace(), poolName})
pool.create(oc)
//the lifetime set for 4 claims initially
lifetimeMinuteInitials := []int{4, 8, 12, 20}
e2e.Logf("lifetimeMinuteInitials[] of four claims are %vm %vm(==default) %vm %vm(>maximum)", lifetimeMinuteInitials[0], lifetimeMinuteInitials[1], lifetimeMinuteInitials[2], lifetimeMinuteInitials[3])
defaultLifetimeMinute := 8
maximumLifetimeMinute := 16
e2e.Logf("defaultLifetimeMinute is %vm, maximumLifetimeMinute is %vm", defaultLifetimeMinute, maximumLifetimeMinute)
exutil.By("Add claimLifetime field (default and maximum) in .spec of clusterpool CR...")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "--type", "merge", "-p", fmt.Sprintf("{\"spec\":{\"claimLifetime\":{\"default\": \"%dm\"}}}", defaultLifetimeMinute)}).check(oc)
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "--type", "merge", "-p", fmt.Sprintf("{\"spec\":{\"claimLifetime\":{\"maximum\": \"%dm\"}}}", maximumLifetimeMinute)}).check(oc)
exutil.By("Check if ClusterPool has already existed")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, poolName, ok, DefaultTimeout, []string{"ClusterPool", "-n", oc.Namespace()}).check(oc)
exutil.By("Create 4 clusterclaims named claim1 & claim2 & claim3 & claim4 with different .spec.lifetime from lifetimeMinuteInitials[]")
for claimIndex, lifetimeMinuteInitial := range lifetimeMinuteInitials {
exutil.By("Create a clusterclaim named claim" + strconv.Itoa(claimIndex+1))
claimTemp := filepath.Join(testDataDir, "clusterclaim.yaml")
claimName := poolName + "-claim" + strconv.Itoa(claimIndex+1)
claim := clusterClaim{
name: claimName,
namespace: oc.Namespace(),
clusterPoolName: poolName,
template: claimTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterClaim", oc.Namespace(), claimName})
claim.create(oc)
exutil.By("patch claim" + strconv.Itoa(claimIndex+1) + " with spec.lifetime=" + strconv.Itoa(lifetimeMinuteInitial) + "m")
e2e.Logf("patch the lifetime if it not equals to defaultLifetimeMinute")
//if the .spec.lifetime is nil and default is not nil, it will be auto-filled by default lifetime
if lifetimeMinuteInitial != defaultLifetimeMinute {
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"clusterclaim", claimName, "-n", oc.Namespace(), "--type", "merge", "-p", fmt.Sprintf("{\"spec\":{\"lifetime\": \"%dm\"}}", lifetimeMinuteInitial)}).check(oc)
}
exutil.By("check the lifetime if it equals to lifetimeMinuteInitial[] or default or maximum lifetime")
//if the lifetimeMinuteSet > maximumLifetimeMinute, the liftime will be maximumLifetimeMinute, not the lifetimeMinuteSet
lifetimeMinuteFinal := int(math.Min(float64(lifetimeMinuteInitial), float64(maximumLifetimeMinute)))
newCheck("expect", "get", asAdmin, withoutNamespace, contain, fmt.Sprintf("%dm", lifetimeMinuteFinal), ok, DefaultTimeout, []string{"clusterclaim", claimName, "-n", oc.Namespace(), "-o=jsonpath={.status.lifetime}"}).check(oc)
}
//allowable for time error
timeThreshold := 30.0
//Check which claimName is timeout, between [0,4] is valid
timeoutClaimName := 0
//check each claimIndex status in different time
checkClaimStatus := func() bool {
//totally there are 4 claims, judge which claims should exist
if timeoutClaimName < 4 {
exutil.By(fmt.Sprintf("claim %d-4 should exist, check if it is really exist, by checking there are not deletionTimestamp", timeoutClaimName+1))
for claimNo := 4; claimNo > timeoutClaimName; claimNo-- {
claimName := poolName + "-claim" + strconv.Itoa(claimNo)
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterclaim", claimName, "-n", oc.Namespace(), "-o=jsonpath={.metadata.deletionTimestamp}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
//no deletionTimestamp means this claim still exist
o.Expect(stdout).To(o.Equal(""))
}
} else {
exutil.By("all claim should not exist, no need to check which claim still alive")
}
//there is no claim be end of life, return directly
if timeoutClaimName == 0 {
e2e.Logf("all claims exist, no need to check which claim disappears")
timeoutClaimName++
return true
}
//check the claim timeoutClaimName will be deleted in this time
exutil.By(fmt.Sprintf("check if claim 1-%d not exist or being deleted, only need to check the claim%v", timeoutClaimName, timeoutClaimName))
claimName := poolName + "-claim" + strconv.Itoa(timeoutClaimName)
//check if the claim has already been deleted
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterclaim", "-n", oc.Namespace()).Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
//if the claim has been deleted, return directly
if !strings.Contains(stdout, claimName) {
e2e.Logf("the claim%d has been deleted, waiting for checking claim%d", timeoutClaimName, timeoutClaimName+1)
timeoutClaimName++
return true
}
//record creationTimestamp
stdout, _, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterclaim", claimName, "-n", oc.Namespace(), "-o=jsonpath={.metadata.creationTimestamp}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
creationTime, err := time.Parse(time.RFC3339, stdout)
o.Expect(err).NotTo(o.HaveOccurred())
//record deletionTimestamp
stdout, _, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterclaim", claimName, "-n", oc.Namespace(), "-o=jsonpath={.metadata.deletionTimestamp}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stdout).NotTo(o.Equal(""))
deletionTime, err := time.Parse(time.RFC3339, stdout)
o.Expect(err).NotTo(o.HaveOccurred())
//calculate the lifetimeMinuteSet for this claimIndex
lifetimeMinuteFinal := int(math.Min(float64(lifetimeMinuteInitials[timeoutClaimName-1]), float64(maximumLifetimeMinute)))
//calculate the time error, and it should be less than the allowable time error set
gapTime := deletionTime.Sub(creationTime.Add(time.Duration(lifetimeMinuteFinal) * time.Minute))
o.Expect(math.Abs(gapTime.Seconds()) < timeThreshold).To(o.BeTrue())
timeoutClaimName++
return true
}
exutil.By("check the claim status on timeline")
o.Consistently(checkClaimStatus).WithTimeout(time.Duration(maximumLifetimeMinute+1) * time.Minute).WithPolling(time.Duration(lifetimeMinuteInitials[0]) * time.Minute).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | dd2c0d07-5fb5-4c1e-ae88-42e99be9c74e | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-Medium-34148-[HiveSpec] Hive supports spot instances in machine pools[Serial] | ['"context"', '"os"', '"path/filepath"', '"strconv"', '"strings"', '"time"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"', 'cloudFormationTypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types"', '"github.com/aws/aws-sdk-go-v2/service/ec2"', '"github.com/aws/aws-sdk-go-v2/service/ec2/types"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-Medium-34148-[HiveSpec] Hive supports spot instances in machine pools[Serial]", func() {
testCaseID := "34148"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config cd1 Install-Config Secret...")
installConfigSecret := installConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
name2: cdName,
region: AWSRegion,
template: filepath.Join(testDataDir, "aws-install-config.yaml"),
}
exutil.By("Config ClusterDeployment...")
clusterImageSetName := cdName + "-imageset"
cluster := clusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
clusterName: cdName,
platformType: "aws",
credRef: AWSCreds,
region: AWSRegion,
imageSetRef: clusterImageSetName,
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 1,
template: filepath.Join(testDataDir, "clusterdeployment.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Check Aws ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
e2e.Logf("Create tmp directory")
tmpDir := "/tmp/" + cdName + "-" + getRandomString()
defer os.RemoveAll(tmpDir)
err := os.MkdirAll(tmpDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create spots instances")
replicasCount := 2
machinepoolName := cdName + "-spot"
spotMachinepoolYaml := `
apiVersion: hive.openshift.io/v1
kind: MachinePool
metadata:
name: ` + machinepoolName + `
namespace: ` + oc.Namespace() + `
spec:
clusterDeploymentRef:
name: ` + cdName + `
name: spot
platform:
aws:
rootVolume:
iops: 100
size: 22
type: gp2
type: m4.xlarge
spotMarketOptions: {}
replicas: ` + strconv.Itoa(replicasCount)
var filename = tmpDir + "/" + testCaseID + "-machinepool-spot.yaml"
err = os.WriteFile(filename, []byte(spotMachinepoolYaml), 0644)
o.Expect(err).NotTo(o.HaveOccurred())
defer cleanupObjects(oc, objectTableRef{"MachinePool", oc.Namespace(), machinepoolName})
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filename).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Login to target cluster, check macheine & machineset are created on openshift-machine-api namespace.")
e2e.Logf("Extracting kubeconfig ...")
getClusterKubeconfig(oc, cdName, oc.Namespace(), tmpDir)
kubeconfig := tmpDir + "/kubeconfig"
e2e.Logf("Checking the spotMachine number equals to replicas number: %v", replicasCount)
var instanceIds []string
checkSpotMachineCount := func() bool {
instanceIds = getMachinePoolInstancesIds(oc, "spot", kubeconfig)
e2e.Logf("spotMachineCount: %v", len(instanceIds))
return len(instanceIds) == replicasCount
}
o.Eventually(checkSpotMachineCount).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(o.BeTrue())
e2e.Logf("SpotMachine Instance IDs have been found")
e2e.Logf("Checking the spotMachineset ready number equals to replicas number: %v", replicasCount)
checkSpotMachinesetReadyCount := func() bool {
SpotMachinesetReadyCount := 0
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+kubeconfig, "machineset", "-n", "openshift-machine-api", "-o=jsonpath={.items[*].metadata.name}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
for _, spotMachinesetName := range strings.Split(stdout, " ") {
if strings.Contains(spotMachinesetName, "spot-") {
stdout, _, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+kubeconfig, "machineset", spotMachinesetName, "-n", "openshift-machine-api", "-o=jsonpath={.status.replicas}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
tmpNumber, err := strconv.Atoi(stdout)
o.Expect(err).NotTo(o.HaveOccurred())
SpotMachinesetReadyCount += tmpNumber
}
}
e2e.Logf("spotMachinesetReadyCount: %v", SpotMachinesetReadyCount)
return SpotMachinesetReadyCount == replicasCount
}
o.Eventually(checkSpotMachinesetReadyCount).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(o.BeTrue())
exutil.By("login to aws console, check there will add 2 Spot Requests in ec2 \"Spot Requests\" list")
// Get AWS client
cfg := getAWSConfig(oc, AWSRegion)
ec2Client := ec2.NewFromConfig(cfg)
waitUntilSpotInstanceRequestsCreated := func() bool {
var describeSpotInstanceRequestsOutput *ec2.DescribeSpotInstanceRequestsOutput
describeSpotInstanceRequestsOutput, err = ec2Client.DescribeSpotInstanceRequests(context.Background(), &ec2.DescribeSpotInstanceRequestsInput{
Filters: []types.Filter{
{
Name: aws.String("instance-id"),
Values: instanceIds,
},
},
})
return err == nil && len(describeSpotInstanceRequestsOutput.SpotInstanceRequests) == 2
}
o.Eventually(waitUntilSpotInstanceRequestsCreated).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(o.BeTrue())
exutil.By("Delete the machinepool")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("machinepool", machinepoolName, "-n", oc.Namespace()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Checking the spotMachines disappear")
checkSpotMachineCount = func() bool {
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+kubeconfig, "machine", "-n", "openshift-machine-api", "-o=jsonpath={.items[*].metadata.name}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Count(stdout, "-spot-") == 0
}
o.Eventually(checkSpotMachineCount).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(o.BeTrue())
exutil.By("Checking the spotMachineset ready number is 0")
checkSpotMachinesetReadyCount = func() bool {
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+kubeconfig, "machineset", "-n", "openshift-machine-api", "-o=jsonpath={.items[*].metadata.name}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Count(stdout, "-spot-") == 0
}
o.Eventually(checkSpotMachinesetReadyCount).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 16cb788a-a009-4ac9-8c5f-534e0e439d24 | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-High-62158-ClusterPool deletion should wait until all unclaimed clusters are destroyed - Case 1[Serial] | ['"path/filepath"', '"strings"', '"time"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/credentials"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:kcui-High-62158-ClusterPool deletion should wait until all unclaimed clusters are destroyed - Case 1[Serial]", func() {
testCaseID := "62158"
poolName := "pool-" + testCaseID
imageSetName := poolName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
e2e.Logf("Check if ClusterImageSet was created successfully")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, imageSetName, ok, DefaultTimeout, []string{"ClusterImageSet"}).check(oc)
oc.SetupProject()
exutil.By("Copy AWS platform credentials...")
createAWSCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create ClusterPool...")
poolTemp := filepath.Join(testDataDir, "clusterpool.yaml")
pool := clusterPool{
name: poolName,
namespace: oc.Namespace(),
fake: "true",
baseDomain: AWSBaseDomain,
imageSetRef: imageSetName,
platformType: "aws",
credRef: AWSCreds,
region: AWSRegion,
pullSecretRef: PullSecret,
size: 2,
maxSize: 2,
runningCount: 0,
maxConcurrent: 2,
hibernateAfter: "360m",
template: poolTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterPool", oc.Namespace(), poolName})
pool.create(oc)
e2e.Logf("Check if ClusterPool created successfully and become ready")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "2", ok, FakeClusterInstallTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "-o=jsonpath={.status.standby}"}).check(oc)
exutil.By("Delete 1 CD, then one CD should be provisioning and another one CD should be provisioned")
cdNames := strings.Split(strings.Trim(getCDlistfromPool(oc, poolName), "\n"), "\n")
cmd, _, _, _ := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ClusterDeployment", cdNames[0], "-n", cdNames[0]).Background()
defer cmd.Process.Kill()
e2e.Logf("Checking the two CDs status...")
checkCDStatus := func() bool {
isProvisioned := 0
isProvisioning := 0
for _, cdName := range strings.Split(strings.Trim(getCDlistfromPool(oc, poolName), "\n"), "\n") {
stdout, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterDeployment", "-n", cdName).Output()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(stdout, "Provisioned") {
isProvisioned++
}
if strings.Contains(stdout, "Provisioning") {
isProvisioning++
}
}
e2e.Logf("%v CD is Provisioned and %v CD is Provisioning", isProvisioned, isProvisioning)
return isProvisioned == 1 && isProvisioning == 1
}
o.Eventually(checkCDStatus).WithTimeout(300 * time.Second).WithPolling(5 * time.Second).Should(o.BeTrue())
exutil.By("Delete the ClusterPool, ClusterPool will be not deleted until both CDs are deleted")
stdout, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ClusterPool", poolName, "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stdout).To(o.ContainSubstring("deleted"))
e2e.Logf("Check if all ClusterDeployments have been deleted")
o.Expect(getCDlistfromPool(oc, poolName)).To(o.Equal(""))
}) | |||||
test case | openshift/openshift-tests-private | 4b40a061-2c9b-4479-9e3e-d198bf07237c | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:mihuang-High-69203-Add annotation to override installer image name. [Serial] | ['"os"', '"path/filepath"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:mihuang-High-69203-Add annotation to override installer image name. [Serial]", func() {
testCaseID := "69203"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("config Install-Config Secret...")
installConfigSecret := installConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
name2: cdName,
region: AWSRegion,
template: filepath.Join(testDataDir, "aws-install-config.yaml"),
}
exutil.By("Config ClusterDeployment...")
installerType := "installer-altinfra"
clusterImageSetName := cdName + "-imageset"
cluster := clusterDeployment{
fake: "false",
installerType: installerType,
name: cdName,
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
clusterName: cdName,
platformType: "aws",
credRef: AWSCreds,
region: AWSRegion,
imageSetRef: clusterImageSetName,
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 1,
template: filepath.Join(testDataDir, "clusterdeployment.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Check ClusterDeployment installed pod is running")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "Running", ok, DefaultTimeout, []string{"pods", "-n", oc.Namespace(), "-l", "hive.openshift.io/job-type=provision", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
exutil.By("Check the image used is the version specified.")
secretFile, secretErr := getPullSecret(oc)
defer os.Remove(secretFile)
o.Expect(secretErr).NotTo(o.HaveOccurred())
installerImage, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "info", "--image-for="+installerType, testOCPImage, "--registry-config="+secretFile).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("testInstallerImage: %v", installerImage)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, installerImage, ok, DefaultTimeout, []string{"clusterdeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.installerImage}"}).check(oc)
}) | |||||
test case | openshift/openshift-tests-private | ccc2875d-5403-40e4-9451-e358aa18561a | Author:mihuang-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-High-40825-[HiveSDRosa] Support AWS AssumeRole credentials cluster. [Disruptive] | ['"context"', '"encoding/json"', '"fmt"', '"os"', '"os/exec"', '"path/filepath"', '"strings"', '"time"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"', '"github.com/aws/aws-sdk-go-v2/credentials"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("Author:mihuang-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-High-40825-[HiveSDRosa] Support AWS AssumeRole credentials cluster. [Disruptive]", func() {
testCaseID := "40825"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
hiveUserName := "hive_40825user"
hiveRoleName := "hive_40825role"
customerRoleName := "hive_40825csrole"
uuid := "abfgsheb765"
exutil.By("Prepare the AWS Assume Role needed for the test")
dirname := "/tmp/" + oc.Namespace() + "-" + testCaseID
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
e2e.Logf("Check if the user and roles exist")
cfg := getAWSConfig(oc, AWSRegion)
iamClient := iam.NewFromConfig(cfg)
_, err = iamClient.GetUser(context.Background(), &iam.GetUserInput{
UserName: aws.String(hiveUserName),
})
o.Expect(err).To(o.HaveOccurred())
roleNameList := []string{hiveRoleName, customerRoleName}
for _, roleName := range roleNameList {
_, err = iamClient.GetRole(context.Background(), &iam.GetRoleInput{
RoleName: aws.String(roleName),
})
o.Expect(err).To(o.HaveOccurred())
}
e2e.Logf("Create the user for hive testing")
defer func() {
_, err = iamClient.DeleteUser(context.Background(), &iam.DeleteUserInput{
UserName: aws.String(hiveUserName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to delete the user")
}()
createUserOutput, err := iamClient.CreateUser(context.Background(), &iam.CreateUserInput{
UserName: aws.String(hiveUserName),
})
o.Expect(err).NotTo(o.HaveOccurred())
hiveUserARN := createUserOutput.User.Arn
e2e.Logf("the user is successfully created: %v, the hiveUserArn is: %v", *createUserOutput.User.UserName, *hiveUserARN)
e2e.Logf("Assign the policy to the user")
_, err = iamClient.AttachUserPolicy(context.Background(), &iam.AttachUserPolicyInput{
PolicyArn: aws.String("arn:aws:iam::aws:policy/AdministratorAccess"),
UserName: aws.String(hiveUserName),
})
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
attachedPolicyOutput, err := iamClient.ListAttachedUserPolicies(context.Background(), &iam.ListAttachedUserPoliciesInput{
UserName: aws.String(hiveUserName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list the attached policies")
for _, policy := range attachedPolicyOutput.AttachedPolicies {
_, err = iamClient.DetachUserPolicy(context.Background(), &iam.DetachUserPolicyInput{
PolicyArn: policy.PolicyArn,
UserName: aws.String(hiveUserName),
})
if err != nil {
e2e.Logf("failed to detach the policy: %v", err)
}
}
}()
time.Sleep(1 * time.Minute)
e2e.Logf("Create the role for hive user")
policyDocument, err := createAssumeRolePolicyDocument(*hiveUserARN, "")
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the policyDocument is: %v", policyDocument)
defer func() {
_, err = iamClient.DeleteRole(context.Background(), &iam.DeleteRoleInput{
RoleName: aws.String(hiveRoleName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to delete the hive role")
}()
createRoleOutput, err := iamClient.CreateRole(context.Background(), &iam.CreateRoleInput{
RoleName: aws.String(hiveRoleName),
AssumeRolePolicyDocument: aws.String(string(policyDocument)),
})
o.Expect(err).NotTo(o.HaveOccurred())
hiveRoleArn := createRoleOutput.Role.Arn
e2e.Logf("successfully created the role for hive testing: %v, hiveRoleArn is: %v", *createRoleOutput.Role.RoleName, *hiveRoleArn)
e2e.Logf(("Assign the policy to the role"))
_, err = iamClient.AttachRolePolicy(context.Background(), &iam.AttachRolePolicyInput{
PolicyArn: aws.String("arn:aws:iam::aws:policy/AdministratorAccess"),
RoleName: aws.String(hiveRoleName),
})
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
attachedPolicies, err := iamClient.ListAttachedRolePolicies(context.Background(), &iam.ListAttachedRolePoliciesInput{
RoleName: aws.String(hiveRoleName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list the attached policies")
for _, policy := range attachedPolicies.AttachedPolicies {
_, err = iamClient.DetachRolePolicy(context.Background(), &iam.DetachRolePolicyInput{
PolicyArn: policy.PolicyArn,
RoleName: aws.String(hiveRoleName),
})
if err != nil {
e2e.Logf("failed to detach the policy: %v", err)
}
}
}()
time.Sleep(1 * time.Minute)
e2e.Logf("Create the customer role for hive role to assume")
customerRolePolicyDocument, err := createAssumeRolePolicyDocument(*hiveRoleArn, uuid)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("the customerRolePolicyDocument is: %v", customerRolePolicyDocument)
defer func() {
_, err := iamClient.DeleteRole(context.Background(), &iam.DeleteRoleInput{
RoleName: aws.String(customerRoleName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to delete the customer role")
}()
createCustomerRoleOutput, err := iamClient.CreateRole(context.Background(), &iam.CreateRoleInput{
RoleName: aws.String(customerRoleName),
AssumeRolePolicyDocument: aws.String(string(customerRolePolicyDocument)),
})
o.Expect(err).NotTo(o.HaveOccurred())
customerRoleArn := createCustomerRoleOutput.Role.Arn
e2e.Logf("the created customer rolev %v for hive testing, role arn is: %v", *createCustomerRoleOutput.Role.RoleName, *customerRoleArn)
e2e.Logf("Attach the customer role to the hive role")
_, err = iamClient.AttachRolePolicy(context.Background(), &iam.AttachRolePolicyInput{
PolicyArn: aws.String("arn:aws:iam::aws:policy/AdministratorAccess"),
RoleName: aws.String(customerRoleName),
})
o.Expect(err).NotTo(o.HaveOccurred())
defer func() {
attachedPolicies, err := iamClient.ListAttachedRolePolicies(context.Background(), &iam.ListAttachedRolePoliciesInput{
RoleName: aws.String(customerRoleName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "failed to list the attached policies")
for _, policy := range attachedPolicies.AttachedPolicies {
_, err = iamClient.DetachRolePolicy(context.Background(), &iam.DetachRolePolicyInput{
PolicyArn: policy.PolicyArn,
RoleName: aws.String(customerRoleName),
})
if err != nil {
e2e.Logf("failed to detach the policy: %v", err)
}
}
}()
e2e.Logf("Create access key for hive user")
iamCredsOutput, err := iamClient.CreateAccessKey(context.Background(), &iam.CreateAccessKeyInput{
UserName: aws.String(hiveUserName),
})
o.Expect(err).NotTo(o.HaveOccurred())
awsAccessKeyId := iamCredsOutput.AccessKey.AccessKeyId
awsSecretAccessKey := iamCredsOutput.AccessKey.SecretAccessKey
defer func() {
_, err = iamClient.DeleteAccessKey(context.Background(), &iam.DeleteAccessKeyInput{
AccessKeyId: aws.String(*awsAccessKeyId),
UserName: aws.String(hiveUserName),
})
o.Expect(err).NotTo(o.HaveOccurred())
}()
e2e.Logf("Create aws-service-provider-config")
awsServiceProviderConfig := fmt.Sprintf(`
[default]
aws_access_key_id = %s
aws_secret_access_key = %s
role_arn = %s
`, *awsAccessKeyId, *awsSecretAccessKey, *hiveRoleArn)
awsServiceProviderConfigFile := filepath.Join(dirname, "aws-service-provider-config")
err = os.WriteFile(awsServiceProviderConfigFile, []byte(awsServiceProviderConfig), 0644)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Create aws-service-provider-secret to target namespace")
awsServiceProviderSecret := "aws-service-provider-secret"
defer oc.AsAdmin().Run("delete").Args("secret", awsServiceProviderSecret, "-n", HiveNamespace).Execute()
_, err = oc.AsAdmin().Run("create").Args("secret", "generic", awsServiceProviderSecret, "-n", HiveNamespace, "--from-file=aws_config="+awsServiceProviderConfigFile).Output()
o.Expect(err).NotTo(o.HaveOccurred())
secretOutput, err := oc.AsAdmin().Run("get").Args("secret", awsServiceProviderSecret, "-n", HiveNamespace).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(secretOutput).To(o.ContainSubstring(awsServiceProviderSecret))
e2e.Logf("Update HiveConfig to use the AWS Service Provider secret")
defer func() {
e2e.Logf("Restoring serviceProviderCredentialsConfig in HiveConfig")
restorePatch := `[{"op": "remove", "path": "/spec/serviceProviderCredentialsConfig"}]`
_, err := oc.AsAdmin().Run("patch").Args("hiveconfig", "hive", "--type", "json", "-p", restorePatch).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}()
patchCmd := fmt.Sprintf("[{\"op\":\"replace\",\"path\":\"/spec/serviceProviderCredentialsConfig\",\"value\":{\"aws\":{\"credentialsSecretRef\":{\"name\":\"%s\"}}}}]", awsServiceProviderSecret)
_, err = oc.AsAdmin().Run("patch").Args(
"hiveconfig",
"hive",
"--type=json",
"-p="+patchCmd,
).Output()
o.Expect(err).NotTo(o.HaveOccurred())
secretRefOutput, err := oc.AsAdmin().Run("get").Args("hiveconfig", "hive", "-o=jsonpath={.spec.serviceProviderCredentialsConfig.aws.credentialsSecretRef.name}").Output()
e2e.Logf("secretRefOutput: %v", secretRefOutput)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(secretRefOutput).To(o.Equal("aws-service-provider-secret"))
exutil.By("Extract the ccoctl to create STS resources")
ccoctlTarget := "ccoctl"
ccoctlPath := exutil.ExtractCcoctl(oc, testOCPImage, ccoctlTarget)
defer os.Remove(filepath.Dir(ccoctlPath))
credsDir := filepath.Join(dirname, "creds")
e2e.Logf("Extract the credentials requests")
pullSecretFile, pullSecretErr := getPullSecret(oc)
o.Expect(pullSecretErr).NotTo(o.HaveOccurred())
defer os.Remove(pullSecretFile)
credsOutput, err := oc.AsAdmin().Run("adm").Args("release", "extract", testOCPImage, "--credentials-requests", "--cloud=aws", "--registry-config", pullSecretFile, "--to="+credsDir).Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("credsOutput: %v", credsOutput)
e2e.Logf("Create STS resources")
ccoctlOutputDir := filepath.Join(dirname, "_output")
defer func() {
e2e.Logf("Delete the STS resources")
deleteManifestsOutput, err := exec.Command("bash", "-c", fmt.Sprintf("%s aws delete --name %s --region %s", ccoctlPath, cdName, AWSRegion)).CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("deleteManifestsOutput: %v", string(deleteManifestsOutput))
}()
createManifestsOutput, err := exec.Command("bash", "-c", fmt.Sprintf("%s aws create-all --name %s --region %s --credentials-requests-dir %s --output-dir %s", ccoctlPath, cdName, AWSRegion, credsDir, ccoctlOutputDir)).CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("createManifestsOutput: %v", string(createManifestsOutput))
exutil.By("Create a Secret for your private service account signing key created with ccoctl aws create-all above.")
privateSAKeyName := "bound-service-account-signing-key"
defer func() {
e2e.Logf("Delete the Secret for your private service account signing key")
_, err := oc.AsAdmin().Run("delete").Args("secret", privateSAKeyName, "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err = oc.AsAdmin().Run("create").Args("secret", "generic", "-n", oc.Namespace(), privateSAKeyName, "--from-file=bound-service-account-signing-key.key="+filepath.Join(ccoctlOutputDir, "serviceaccount-signer.private")).Output()
o.Expect(err).NotTo(o.HaveOccurred())
boundServiceAccountSigningKeyOutput, err := oc.AsAdmin().Run("get").Args("secret", privateSAKeyName, "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(boundServiceAccountSigningKeyOutput).To(o.ContainSubstring(privateSAKeyName))
exutil.By("Create a Secret for installer manifests (credential role Secrets, Authentication config)")
manifestsSecretName := "cluster-manifests"
defer func() {
e2e.Logf("Delete the Secret for installer manifests")
_, err := oc.AsAdmin().Run("delete").Args("secret", manifestsSecretName, "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
}()
_, err = oc.AsAdmin().Run("create").Args("secret", "generic", manifestsSecretName, "-n", oc.Namespace(), "--from-file="+filepath.Join(ccoctlOutputDir, "manifests")).Output()
o.Expect(err).NotTo(o.HaveOccurred())
clusterManifestsOutput, err := oc.AsAdmin().Run("get").Args("secret", manifestsSecretName, "-n", oc.Namespace()).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(clusterManifestsOutput).To(o.ContainSubstring(manifestsSecretName))
exutil.By("Creating ClusterImageSet")
clusterImageSetName := cdName + "-imageset"
imageSet := clusterImageSet{
name: clusterImageSetName,
releaseImage: testOCPImage,
template: filepath.Join(testDataDir, "clusterimageset.yaml"),
}
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", clusterImageSetName})
imageSet.create(oc)
exutil.By("Creating install-config Secret")
installConfigSecretName := cdName + "-install-config"
installConfigSecret := installConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
name2: cdName,
region: AWSRegion,
credentialsMode: "Manual",
template: filepath.Join(testDataDir, "aws-install-config.yaml"),
}
defer cleanupObjects(oc, objectTableRef{"Secret", oc.Namespace(), installConfigSecretName})
installConfigSecret.create(oc)
exutil.By("Copying pull secret")
createPullSecret(oc, oc.Namespace())
exutil.By("Creating ClusterDeployment")
clusterDeployment := clusterDeploymentAssumeRole{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
boundServiceAccountSigningKeySecretRef: privateSAKeyName,
clusterName: cdName,
platformType: "aws",
roleARN: *customerRoleArn,
externalID: uuid,
region: AWSRegion,
manifestsSecretRef: manifestsSecretName,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 1,
template: filepath.Join(testDataDir, "clusterdeployment-aws-assumerole.yaml"),
}
defer cleanupObjects(oc, objectTableRef{"ClusterDeployment", oc.Namespace(), cdName})
clusterDeployment.create(oc)
exutil.By("Create worker MachinePool ...")
workermachinepoolAWSTemp := filepath.Join(testDataDir, "machinepool-worker-aws.yaml")
workermp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: workermachinepoolAWSTemp,
}
defer cleanupObjects(oc,
objectTableRef{"MachinePool", oc.Namespace(), cdName + "-worker"},
)
workermp.create(oc)
exutil.By("Check ClusterDeployment installed pod is running")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "Running", ok, 3*DefaultTimeout, []string{"pods", "-n", oc.Namespace(), "-l", "hive.openshift.io/job-type=provision", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
exutil.By("Check the worker machinepool replicas number equals to 3")
getClusterKubeconfig(oc, cdName, oc.Namespace(), dirname)
e2e.Logf("Check worker machinepool .status.replicas = 3")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "3", ok, DefaultTimeout, []string{"MachinePool", cdName + "-worker", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
exutil.By("Patch machinepool static replicas to autoscaler")
autoScalingMax := "2"
autoScalingMin := "0"
removeConfig := "[{\"op\": \"remove\", \"path\": \"/spec/replicas\"}]"
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-worker", "-n", oc.Namespace(), "--type", "json", "-p", removeConfig}).check(oc)
autoscalConfig := fmt.Sprintf("{\"spec\": {\"autoscaling\": {\"maxReplicas\": %s, \"minReplicas\": %s}}}", autoScalingMax, autoScalingMin)
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-worker", "-n", oc.Namespace(), "--type", "merge", "-p", autoscalConfig}).check(oc)
e2e.Logf("Login to spoke cluster, check the MachineAutoscaler should be created")
kubeconfig := getClusterKubeconfig(oc, cdName, oc.Namespace(), dirname)
o.Eventually(func() bool {
machineAutoscalerNamesList, err := oc.AsAdmin().Run("get").Args("MachineAutoscaler", "-n", "openshift-machine-api", "-o", "jsonpath={.items[*].metadata.name}", "--kubeconfig="+kubeconfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
machineAutoscalerNames := strings.Fields(machineAutoscalerNamesList)
for _, machineAutoscalerName := range machineAutoscalerNames {
machineAutoscaler, _, err := oc.AsAdmin().Run("get").Args("MachineAutoscaler", machineAutoscalerName, "-n", "openshift-machine-api", "-o", "jsonpath={.spec.maxReplicas} { .spec.minReplicas}", "--kubeconfig="+kubeconfig).Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
machineAutoscalerMax := "1"
machineAutoscalerMin := "0"
o.Expect(machineAutoscaler).To(o.Equal(machineAutoscalerMax + " " + machineAutoscalerMin))
}
return true
}).WithTimeout(2*time.Minute).WithPolling(10*time.Second).Should(o.BeTrue(), "MachineAutoscaler successfully created")
exutil.By("Patch machinepool autoscaler to static replicas")
removeConfig2 := "[{\"op\": \"remove\", \"path\": \"/spec/autoscaling\"}]"
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-worker", "-n", oc.Namespace(), "--type", "json", "-p", removeConfig2}).check(oc)
recoverConfig := "{\"spec\": {\"replicas\": 2}}"
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-worker", "-n", oc.Namespace(), "--type", "merge", "-p", recoverConfig}).check(oc)
e2e.Logf("Login to spoke cluster, check the MachineAutoscaler should be deleted")
o.Eventually(func() bool {
machineAutoscalerOutput, err := oc.AsAdmin().Run("get").Args("MachineAutoscaler", "-n", "openshift-machine-api", "-o", "jsonpath={.items[*].metadata.name}", "--kubeconfig="+kubeconfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(machineAutoscalerOutput).To(o.BeEmpty())
return true
}).WithTimeout(2*time.Minute).WithPolling(10*time.Second).Should(o.BeTrue(), "MachineAutoscaler successfully deleted")
e2e.Logf("Check the machinepool replicas number equals to 2")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "2", ok, DefaultTimeout, []string{"MachinePool", cdName + "-worker", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
}) | |||||
test case | openshift/openshift-tests-private | e5956198-aa94-49db-b8be-7d7c85c87bcc | Author:mihuang-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-High-78024-[HiveSpec] Support install cluster with ovn ipv4 subnet configured. [Serial] | ['"fmt"', '"net"', '"os"', '"path/filepath"', '"regexp"', '"strings"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("Author:mihuang-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-High-78024-[HiveSpec] Support install cluster with ovn ipv4 subnet configured. [Serial]", func() {
testCaseID := "78024"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
exutil.By("Config Install-Config Secret...")
ipv4InternalJoinSubnet := "101.64.0.0/16"
installConfigSecret := installConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
name2: cdName,
region: AWSRegion,
internalJoinSubnet: ipv4InternalJoinSubnet,
template: filepath.Join(testDataDir, "aws-install-config.yaml"),
}
exutil.By("Config ClusterDeployment...")
cluster := clusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
clusterName: cdName,
platformType: "aws",
credRef: AWSCreds,
region: AWSRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 1,
template: filepath.Join(testDataDir, "clusterdeployment.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Check AWS ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
exutil.By("Log in to the spoke cluster, check that the OVN IPv4 internal subnet is correctly configured in the running cluster.")
e2e.Logf("Extracting kubeconfig ...")
tmpDir := "/tmp/" + cdName + "-" + getRandomString()
defer os.RemoveAll(tmpDir)
err := os.MkdirAll(tmpDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
kubeconfigFilePath := getClusterKubeconfig(oc, cdName, oc.Namespace(), tmpDir)
ovninternalJoinSubnet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+kubeconfigFilePath, "networks.operator.openshift.io", "cluster", "-o", "jsonpath={.spec.defaultNetwork.ovnKubernetesConfig.ipv4.internalJoinSubnet}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(ovninternalJoinSubnet).To(o.Equal(ipv4InternalJoinSubnet))
exutil.By("Verify whether the routing interface (LRP) of each node is using an IPv4 address.")
nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+kubeconfigFilePath, "nodes", "-o", "jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
nodeList := strings.Split(nodes, " ")
for _, node := range nodeList {
nodeGatewayRouterLrpIfaddrs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("--kubeconfig="+kubeconfigFilePath, "node", node, "-o", "jsonpath={.metadata.annotations.k8s\\.ovn\\.org/node-gateway-router-lrp-ifaddrs}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeGatewayRouterLrpIfaddrs).To(o.ContainSubstring("ipv4"))
re := regexp.MustCompile(`"ipv4":"([0-9\.\/]+)"`)
match := re.FindStringSubmatch(nodeGatewayRouterLrpIfaddrs)
o.Expect(len(match)).To(o.BeNumerically(">", 1))
ipWithCIDR := match[1]
ip := strings.Split(ipWithCIDR, "/")[0]
e2e.Logf("Node %s has gateway router LRP interface with IPv4 address %s", node, ip)
_, ipv4InternalJoinSubnetNet, err := net.ParseCIDR(ipv4InternalJoinSubnet)
o.Expect(err).NotTo(o.HaveOccurred())
nodeIP := net.ParseIP(ip)
o.Expect(nodeIP).NotTo(o.BeNil())
o.Expect(ipv4InternalJoinSubnetNet.Contains(nodeIP)).To(o.BeTrue(), fmt.Sprintf("Routing interface (LRP) of node %s has IPv4 address %s, but it is not in the expected subnet %s", node, ip, ipv4InternalJoinSubnet))
}
}) | |||||
test case | openshift/openshift-tests-private | 52395bf1-4838-40d5-bb2c-312e93bb827b | Author:mihuang-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-High-79046-[HiveSDRosa] AWS Non-CAPI CD install and Day2 infra MachinePool test. [Serial] | ['"os"', '"path/filepath"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_aws.go | g.It("Author:mihuang-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-High-79046-[HiveSDRosa] AWS Non-CAPI CD install and Day2 infra MachinePool test. [Serial]", func() {
testCaseID := "79046"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
testImageVersion := "4.15"
testNonCAPIOCPImage, err := exutil.GetLatestNightlyImage(testImageVersion)
o.Expect(err).NotTo(o.HaveOccurred())
if testNonCAPIOCPImage == "" {
e2e.Failf("Failed to get image for version %v", testImageVersion)
}
exutil.By("Config Install-Config Secret...")
installConfigSecret := installConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
name2: cdName,
region: AWSRegion,
template: filepath.Join(testDataDir, "aws-install-config.yaml"),
}
exutil.By("Config ClusterDeployment...")
cluster := clusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: AWSBaseDomain,
clusterName: cdName,
platformType: "aws",
credRef: AWSCreds,
region: AWSRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 1,
template: filepath.Join(testDataDir, "clusterdeployment.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testNonCAPIOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Create infra MachinePool ...")
inframachinepoolAWSTemp := filepath.Join(testDataDir, "machinepool-infra-aws.yaml")
inframp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: inframachinepoolAWSTemp,
}
defer cleanupObjects(oc, objectTableRef{"MachinePool", oc.Namespace(), cdName + "-infra"})
inframp.create(oc)
exutil.By("Check if ClusterDeployment created successfully and become Provisioned")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
exutil.By("Get the kubeconfig of the cluster")
tmpDir := "/tmp/" + cdName + "-" + getRandomString()
err = os.MkdirAll(tmpDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(tmpDir)
getClusterKubeconfig(oc, cdName, oc.Namespace(), tmpDir)
kubeconfig := tmpDir + "/kubeconfig"
e2e.Logf("Check infra machinepool .status.replicas = 1 ")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "1", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
machinesetsname := getResource(oc, asAdmin, withoutNamespace, "MachinePool", cdName+"-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.machineSets[?(@.replicas==1)].name}")
o.Expect(machinesetsname).NotTo(o.BeEmpty())
e2e.Logf("Remote cluster machineset list: %s", machinesetsname)
e2e.Logf("Check machineset %s created on remote cluster", machinesetsname)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, machinesetsname, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].metadata.name}"}).check(oc)
e2e.Logf("Check only 1 machineset up")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].status.availableReplicas}"}).check(oc)
e2e.Logf("Check only one machines in Running status")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=infra", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
e2e.Logf("Patch infra machinepool .spec.replicas to 3")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"replicas": 3}}`}).check(oc)
machinesetsname = getResource(oc, asAdmin, withoutNamespace, "MachinePool", cdName+"-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.machineSets[?(@.replicas==1)].name}")
o.Expect(machinesetsname).NotTo(o.BeEmpty())
e2e.Logf("Remote cluster machineset list: %s", machinesetsname)
e2e.Logf("Check machineset %s created on remote cluster", machinesetsname)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, machinesetsname, ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].metadata.name}"}).check(oc)
e2e.Logf("Check machinesets scale up to 3")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1 1 1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].status.availableReplicas}"}).check(oc)
e2e.Logf("Check 3 machines in Running status")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=infra", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
e2e.Logf("Patch infra machinepool .spec.replicas to 2")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"replicas": 2}}`}).check(oc)
machinesetsname = getResource(oc, asAdmin, withoutNamespace, "MachinePool", cdName+"-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.machineSets[?(@.replicas==1)].name}")
o.Expect(machinesetsname).NotTo(o.BeEmpty())
e2e.Logf("Remote cluster machineset list: %s", machinesetsname)
e2e.Logf("Check machineset %s created on remote cluster", machinesetsname)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, machinesetsname, ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].metadata.name}"}).check(oc)
e2e.Logf("Check machinesets scale down to 2")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1 1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].status.availableReplicas}"}).check(oc)
e2e.Logf("Check 2 machines in Running status")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=infra", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
}) | |||||
test | openshift/openshift-tests-private | 662d9ddf-699e-4347-a580-ff8d1c159a7c | hive_gcp | import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
compute "cloud.google.com/go/compute/apiv1"
"cloud.google.com/go/compute/apiv1/computepb"
"google.golang.org/api/iterator"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | package hive
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
compute "cloud.google.com/go/compute/apiv1"
"cloud.google.com/go/compute/apiv1/computepb"
"google.golang.org/api/iterator"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/architecture"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
//
// Hive test case suite for GCP
//
var _ = g.Describe("[sig-hive] Cluster_Operator hive should", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("hive", exutil.KubeConfigPath())
ns hiveNameSpace
og operatorGroup
sub subscription
hc hiveconfig
testDataDir string
testOCPImage string
region string
basedomain string
)
g.BeforeEach(func() {
// Skip ARM64 arch
architecture.SkipNonAmd64SingleArch(oc)
// Skip if running on a non-GCP platform
exutil.SkipIfPlatformTypeNot(oc, "gcp")
// Install Hive operator if not
testDataDir = exutil.FixturePath("testdata", "cluster_operator/hive")
_, _ = installHiveOperator(oc, &ns, &og, &sub, &hc, testDataDir)
// Get OCP Image for Hive testing
testOCPImage = getTestOCPImage()
// Get platform configurations
region = getRegion(oc)
basedomain = getBasedomain(oc)
})
// Author: [email protected]
// Timeout: 60min
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:fxie-Critical-68240-Enable UEFISecureBoot for day 2 VMs on GCP [Serial]", func() {
var (
testCaseID = "68240"
cdName = "cd-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
cdTemplate = filepath.Join(testDataDir, "clusterdeployment-gcp.yaml")
icName = cdName + "-install-config"
icTemplate = filepath.Join(testDataDir, "gcp-install-config.yaml")
imageSetName = cdName + "-imageset"
mpTemplate = filepath.Join(testDataDir, "machinepool-infra-gcp.yaml")
)
var (
// Count the number of VMs in a project, after filtering with the passed-in filter
countVMs = func(client *compute.InstancesClient, projectID, filter string) (vmCount int) {
instancesIterator := client.AggregatedList(context.Background(), &computepb.AggregatedListInstancesRequest{
Filter: &filter,
Project: projectID,
})
for {
resp, err := instancesIterator.Next()
if err == iterator.Done {
break
}
o.Expect(err).NotTo(o.HaveOccurred())
vmCount += len(resp.Value.Instances)
}
e2e.Logf("Found VM count = %v", vmCount)
return vmCount
}
)
exutil.By("Getting project ID from the Hive cd")
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
e2e.Logf("Found project ID = %v", projectID)
exutil.By("Creating a spoke cluster with shielded VM enabled")
installConfigSecret := gcpInstallConfig{
name1: icName,
namespace: oc.Namespace(),
baseDomain: basedomain,
name2: cdName,
region: region,
projectid: projectID,
template: icTemplate,
secureBoot: "Enabled",
}
cd := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: basedomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: region,
imageSetRef: imageSetName,
installConfigSecret: icName,
pullSecretRef: PullSecret,
installAttemptsLimit: 1,
template: cdTemplate,
}
defer cleanCD(oc, imageSetName, oc.Namespace(), icName, cdName)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cd)
exutil.By("Waiting for the CD to be installed")
newCheck("expect", "get", asAdmin, requireNS, compare, "true", ok,
ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-o=jsonpath={.spec.installed}"}).check(oc)
// The Google cloud SDK must be able to locate Application Default Credentials (ADC).
// To this end, we should point the GOOGLE_APPLICATION_CREDENTIALS environment
// variable to a Google cloud credential file.
instancesClient, err := compute.NewInstancesRESTClient(context.Background())
o.Expect(err).NotTo(o.HaveOccurred())
filter := fmt.Sprintf("(name=%s*) AND (shieldedInstanceConfig.enableSecureBoot = true)", cdName)
o.Expect(countVMs(instancesClient, projectID, filter)).To(o.Equal(6))
exutil.By("Create an infra MachinePool with secureboot enabled")
inframp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: mpTemplate,
gcpSecureBoot: "Enabled",
}
// The inframp will be deprovisioned along with the CD, so no need to defer a deletion here.
inframp.create(oc)
exutil.By("Make sure all infraVMs have secureboot enabled")
infraId := getInfraIDFromCDName(oc, cdName)
filterInfra := fmt.Sprintf("(name=%s*) AND (shieldedInstanceConfig.enableSecureBoot = true)", infraId+"-infra")
o.Eventually(func() bool {
return countVMs(instancesClient, projectID, filterInfra) == 1
}).WithTimeout(15 * time.Minute).WithPolling(30 * time.Second).Should(o.BeTrue())
})
//author: [email protected]
//default duration is 15m for extended-platform-tests and 35m for jenkins job, need to reset for ClusterPool and ClusterDeployment cases
//example: ./bin/extended-platform-tests run all --dry-run|grep "41777"|./bin/extended-platform-tests run --timeout 60m -f -
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-High-41777-High-28636-Hive API support for GCP[Serial]", func() {
testCaseID := "41777"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config GCP Install-Config Secret...")
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: filepath.Join(testDataDir, "gcp-install-config.yaml"),
}
exutil.By("Config GCP ClusterDeployment...")
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Create worker and infra MachinePool ...")
workermachinepoolGCPTemp := filepath.Join(testDataDir, "machinepool-worker-gcp.yaml")
inframachinepoolGCPTemp := filepath.Join(testDataDir, "machinepool-infra-gcp.yaml")
workermp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: workermachinepoolGCPTemp,
}
inframp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: inframachinepoolGCPTemp,
}
defer cleanupObjects(oc,
objectTableRef{"MachinePool", oc.Namespace(), cdName + "-worker"},
objectTableRef{"MachinePool", oc.Namespace(), cdName + "-infra"},
)
workermp.create(oc)
inframp.create(oc)
exutil.By("Check GCP ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
exutil.By("OCP-28636: Hive supports remote Machine Set Management for GCP")
tmpDir := "/tmp/" + cdName + "-" + getRandomString()
err = os.MkdirAll(tmpDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(tmpDir)
getClusterKubeconfig(oc, cdName, oc.Namespace(), tmpDir)
kubeconfig := tmpDir + "/kubeconfig"
e2e.Logf("Check worker machinepool .status.replicas = 3")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "3", ok, DefaultTimeout, []string{"MachinePool", cdName + "-worker", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
e2e.Logf("Check infra machinepool .status.replicas = 1 ")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "1", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
machinesetsname := getResource(oc, asAdmin, withoutNamespace, "MachinePool", cdName+"-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.machineSets[?(@.replicas==1)].name}")
o.Expect(machinesetsname).NotTo(o.BeEmpty())
e2e.Logf("Remote cluster machineset list: %s", machinesetsname)
e2e.Logf("Check machineset %s created on remote cluster", machinesetsname)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, machinesetsname, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].metadata.name}"}).check(oc)
e2e.Logf("Check only 1 machineset up")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].status.availableReplicas}"}).check(oc)
e2e.Logf("Check only one machines in Running status")
// Can't filter by infra label because of bug https://issues.redhat.com/browse/HIVE-1922
//newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=infra", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-o=jsonpath={.items[?(@.spec.metadata.labels.node-role\\.kubernetes\\.io==\"infra\")].status.phase}"}).check(oc)
e2e.Logf("Patch infra machinepool .spec.replicas to 3")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"replicas": 3}}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "3", ok, 5*DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
machinesetsname = getResource(oc, asAdmin, withoutNamespace, "MachinePool", cdName+"-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.machineSets[?(@.replicas==1)].name}")
o.Expect(machinesetsname).NotTo(o.BeEmpty())
e2e.Logf("Remote cluster machineset list: %s", machinesetsname)
e2e.Logf("Check machineset %s created on remote cluster", machinesetsname)
machinesetsArray := strings.Fields(machinesetsname)
o.Expect(len(machinesetsArray) == 3).Should(o.BeTrue())
for _, machinesetName := range machinesetsArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, machinesetName, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].metadata.name}"}).check(oc)
}
e2e.Logf("Check machinesets scale up to 3")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1 1 1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].status.availableReplicas}"}).check(oc)
e2e.Logf("Check 3 machines in Running status")
// Can't filter by infra label because of bug https://issues.redhat.com/browse/HIVE-1922
//newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=infra", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-o=jsonpath={.items[?(@.spec.metadata.labels.node-role\\.kubernetes\\.io==\"infra\")].status.phase}"}).check(oc)
e2e.Logf("Patch infra machinepool .spec.replicas to 2")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"replicas": 2}}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "2", ok, 5*DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
machinesetsname = getResource(oc, asAdmin, withoutNamespace, "MachinePool", cdName+"-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.machineSets[?(@.replicas==1)].name}")
o.Expect(machinesetsname).NotTo(o.BeEmpty())
e2e.Logf("Remote cluster machineset list: %s", machinesetsname)
e2e.Logf("Check machineset %s created on remote cluster", machinesetsname)
machinesetsArray = strings.Fields(machinesetsname)
o.Expect(len(machinesetsArray) == 2).Should(o.BeTrue())
for _, machinesetName := range machinesetsArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, machinesetName, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].metadata.name}"}).check(oc)
}
e2e.Logf("Check machinesets scale down to 2")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1 1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].status.availableReplicas}"}).check(oc)
e2e.Logf("Check 2 machines in Running status")
// Can't filter by infra label because of bug https://issues.redhat.com/browse/HIVE-1922
//newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=infra", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-o=jsonpath={.items[?(@.spec.metadata.labels.node-role\\.kubernetes\\.io==\"infra\")].status.phase}"}).check(oc)
})
//author: [email protected]
//default duration is 15m for extended-platform-tests and 35m for jenkins job, need to reset for ClusterPool and ClusterDeployment cases
//example: ./bin/extended-platform-tests run all --dry-run|grep "33872"|./bin/extended-platform-tests run --timeout 60m -f -
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-Medium-33872-[gcp]Hive supports ClusterPool [Serial]", func() {
testCaseID := "33872"
poolName := "pool-" + testCaseID
imageSetName := poolName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
exutil.By("Check if ClusterImageSet was created successfully")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, imageSetName, ok, DefaultTimeout, []string{"ClusterImageSet"}).check(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and gcp-credentials to target namespace for the pool
exutil.By("Copy GCP platform credentials...")
createGCPCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create ClusterPool...")
poolTemp := filepath.Join(testDataDir, "clusterpool-gcp.yaml")
pool := gcpClusterPool{
name: poolName,
namespace: oc.Namespace(),
fake: "false",
baseDomain: GCPBaseDomain,
imageSetRef: imageSetName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
pullSecretRef: PullSecret,
size: 1,
maxSize: 1,
runningCount: 0,
maxConcurrent: 1,
hibernateAfter: "360m",
template: poolTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterPool", oc.Namespace(), poolName})
pool.create(oc)
exutil.By("Check if GCP ClusterPool created successfully and become ready")
//runningCount is 0 so pool status should be standby: 1, ready: 0
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "1", ok, ClusterInstallTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "-o=jsonpath={.status.standby}"}).check(oc)
exutil.By("Check if CD is Hibernating")
cdListStr := getCDlistfromPool(oc, poolName)
var cdArray []string
cdArray = strings.Split(strings.TrimSpace(cdListStr), "\n")
for i := range cdArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "Hibernating", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdArray[i], "-n", cdArray[i]}).check(oc)
}
exutil.By("Patch pool.spec.lables.test=test...")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"labels":{"test":"test"}}}`}).check(oc)
exutil.By("The existing CD in the pool has no test label")
for i := range cdArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "test", nok, DefaultTimeout, []string{"ClusterDeployment", cdArray[i], "-n", cdArray[i], "-o=jsonpath={.metadata.labels}"}).check(oc)
}
exutil.By("The new CD in the pool should have the test label")
e2e.Logf("Delete the old CD in the pool")
newCheck("expect", "delete", asAdmin, withoutNamespace, contain, "delete", ok, ClusterUninstallTimeout, []string{"ClusterDeployment", cdArray[0], "-n", cdArray[0]}).check(oc)
e2e.Logf("Get the CD list from the pool again.")
cdListStr = getCDlistfromPool(oc, poolName)
cdArray = strings.Split(strings.TrimSpace(cdListStr), "\n")
for i := range cdArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "test", ok, DefaultTimeout, []string{"ClusterDeployment", cdArray[i], "-n", cdArray[i], "-o=jsonpath={.metadata.labels}"}).check(oc)
}
})
//author: [email protected]
//default duration is 15m for extended-platform-tests and 35m for jenkins job, need to reset for ClusterPool and ClusterDeployment cases
//example: ./bin/extended-platform-tests run all --dry-run|grep "44475"|./bin/extended-platform-tests run --timeout 90m -f -
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:liangli-Medium-44475-Medium-45158-[gcp]Hive Change BaseDomain field right after creating pool and all clusters finish install firstly then recreated [Serial]", func() {
testCaseID := "44475"
poolName := "pool-" + testCaseID
imageSetName := poolName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
exutil.By("Check if ClusterImageSet was created successfully")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, imageSetName, ok, DefaultTimeout, []string{"ClusterImageSet"}).check(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and gcp-credentials to target namespace for the clusterdeployment
exutil.By("Copy GCP platform credentials...")
createGCPCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create ClusterPool...")
poolTemp := filepath.Join(testDataDir, "clusterpool-gcp.yaml")
pool := gcpClusterPool{
name: poolName,
namespace: oc.Namespace(),
fake: "false",
baseDomain: GCPBaseDomain,
imageSetRef: imageSetName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
pullSecretRef: PullSecret,
size: 1,
maxSize: 1,
runningCount: 0,
maxConcurrent: 1,
hibernateAfter: "360m",
template: poolTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterPool", oc.Namespace(), poolName})
pool.create(oc)
e2e.Logf("Check ClusterDeployment in pool created")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, poolName, ok, DefaultTimeout, []string{"ClusterDeployment", "-A", "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
e2e.Logf("get old ClusterDeployment Name")
cdListStr := getCDlistfromPool(oc, poolName)
oldClusterDeploymentName := strings.Split(strings.TrimSpace(cdListStr), "\n")
o.Expect(len(oldClusterDeploymentName) > 0).Should(o.BeTrue())
e2e.Logf("old cd name:" + oldClusterDeploymentName[0])
exutil.By("OCP-45158: Check Provisioned condition")
e2e.Logf("Check ClusterDeployment is provisioning")
expectedResult := "message:Cluster is provisioning,reason:Provisioning,status:False"
jsonPath := "-o=jsonpath={\"message:\"}{.status.conditions[?(@.type==\"Provisioned\")].message}{\",reason:\"}{.status.conditions[?(@.type==\"Provisioned\")].reason}{\",status:\"}{.status.conditions[?(@.type==\"Provisioned\")].status}"
newCheck("expect", "get", asAdmin, withoutNamespace, contain, expectedResult, ok, DefaultTimeout, []string{"ClusterDeployment", oldClusterDeploymentName[0], "-n", oldClusterDeploymentName[0], jsonPath}).check(oc)
e2e.Logf("Check ClusterDeployment Provisioned finish")
expectedResult = "message:Cluster is provisioned,reason:Provisioned,status:True"
newCheck("expect", "get", asAdmin, withoutNamespace, contain, expectedResult, ok, ClusterInstallTimeout, []string{"ClusterDeployment", oldClusterDeploymentName[0], "-n", oldClusterDeploymentName[0], jsonPath}).check(oc)
exutil.By("Check if GCP ClusterPool created successfully and become ready")
//runningCount is 0 so pool status should be standby: 1, ready: 0
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "1", ok, DefaultTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "-o=jsonpath={.status.standby}"}).check(oc)
exutil.By("test OCP-44475")
e2e.Logf("oc patch ClusterPool 'spec.baseDomain'")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("ClusterPool", poolName, "-n", oc.Namespace(), "-p", `{"spec":{"baseDomain":"`+GCPBaseDomain2+`"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check ClusterDeployment is Deprovisioning")
expectedResult = "message:Cluster is deprovisioning,reason:Deprovisioning,status:False"
newCheck("expect", "get", asAdmin, withoutNamespace, contain, expectedResult, ok, DefaultTimeout, []string{"ClusterDeployment", oldClusterDeploymentName[0], "-n", oldClusterDeploymentName[0], jsonPath}).check(oc)
e2e.Logf("Check ClusterDeployment is Deprovisioned")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, oldClusterDeploymentName[0], nok, ClusterUninstallTimeout, []string{"ClusterDeployment", "-A", "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
e2e.Logf("Check if ClusterPool re-create the CD")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, poolName, ok, DefaultTimeout, []string{"ClusterDeployment", "-A", "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
e2e.Logf("get new ClusterDeployment name")
cdListStr = getCDlistfromPool(oc, poolName)
newClusterDeploymentName := strings.Split(strings.TrimSpace(cdListStr), "\n")
o.Expect(len(newClusterDeploymentName) > 0).Should(o.BeTrue())
e2e.Logf("new cd name:" + newClusterDeploymentName[0])
newCheck("expect", "get", asAdmin, withoutNamespace, contain, GCPBaseDomain2, ok, DefaultTimeout, []string{"ClusterDeployment", newClusterDeploymentName[0], "-n", newClusterDeploymentName[0], "-o=jsonpath={.spec.baseDomain}"}).check(oc)
o.Expect(strings.Compare(oldClusterDeploymentName[0], newClusterDeploymentName[0]) != 0).Should(o.BeTrue())
})
//author: [email protected]
//default duration is 15m for extended-platform-tests and 35m for jenkins job, need to reset for ClusterPool and ClusterDeployment cases
//example: ./bin/extended-platform-tests run all --dry-run|grep "41499"|./bin/extended-platform-tests run --timeout 60m -f -
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-High-41499-High-34404-High-25333-Hive syncset test for paused and multi-modes[Serial]", func() {
testCaseID := "41499"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config GCP Install-Config Secret...")
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: filepath.Join(testDataDir, "gcp-install-config.yaml"),
}
exutil.By("Config GCP ClusterDeployment...")
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Check GCP ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
tmpDir := "/tmp/" + cdName + "-" + getRandomString()
err = os.MkdirAll(tmpDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(tmpDir)
getClusterKubeconfig(oc, cdName, oc.Namespace(), tmpDir)
kubeconfig := tmpDir + "/kubeconfig"
exutil.By("OCP-41499: Add condition in ClusterDeployment status for paused syncset")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, cdName, ok, DefaultTimeout, []string{"ClusterSync", cdName, "-n", oc.Namespace()}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "False", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"SyncSetFailed\")].status}"}).check(oc)
e2e.Logf("Add \"hive.openshift.io/syncset-pause\" annotation in ClusterDeployment, and delete ClusterSync CR")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "--type", "merge", "-p", `{"metadata": {"annotations": {"hive.openshift.io/syncset-pause": "true"}}}`}).check(oc)
newCheck("expect", "delete", asAdmin, withoutNamespace, contain, "delete", ok, DefaultTimeout, []string{"ClusterSync", cdName, "-n", oc.Namespace()}).check(oc)
e2e.Logf("Check ClusterDeployment condition=SyncSetFailed")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "True", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"SyncSetFailed\")].status}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "SyncSetPaused", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"SyncSetFailed\")].reason}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "SyncSet is paused. ClusterSync will not be created", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"SyncSetFailed\")].message}"}).check(oc)
e2e.Logf("Check ClusterSync won't be created.")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, cdName, nok, DefaultTimeout, []string{"ClusterSync", "-n", oc.Namespace()}).check(oc)
e2e.Logf("Remove annotation, check ClusterSync will be created again.")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "--type", "merge", "-p", `{"metadata": {"annotations": {"hive.openshift.io/syncset-pause": "false"}}}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "False", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"SyncSetFailed\")].status}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, cdName, ok, DefaultTimeout, []string{"ClusterSync", cdName, "-n", oc.Namespace()}).check(oc)
exutil.By("OCP-34404: Hive adds muti-modes for syncset to handle applying resources too large")
e2e.Logf("Create SyncSet with default applyBehavior.")
syncSetName := testCaseID + "-syncset-1"
configMapName := testCaseID + "-configmap-1"
configMapNamespace := testCaseID + "-" + getRandomString() + "-hive-1"
resourceMode := "Sync"
syncTemp := filepath.Join(testDataDir, "syncset-resource.yaml")
syncResource := syncSetResource{
name: syncSetName,
namespace: oc.Namespace(),
namespace2: configMapNamespace,
cdrefname: cdName,
cmname: configMapName,
cmnamespace: configMapNamespace,
ramode: resourceMode,
template: syncTemp,
}
defer cleanupObjects(oc, objectTableRef{"SyncSet", oc.Namespace(), syncSetName})
syncResource.create(oc)
e2e.Logf("Check ConfigMap is created on target cluster and have a last-applied-config annotation.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName, "-n", configMapNamespace, "-o=jsonpath={.data}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "kubectl.kubernetes.io/last-applied-configuration", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName, "-n", configMapNamespace, "-o=jsonpath={.metadata.annotations}"}).check(oc)
e2e.Logf("Patch syncset resource.")
patchYaml := `
spec:
resources:
- apiVersion: v1
kind: Namespace
metadata:
name: ` + configMapNamespace + `
- apiVersion: v1
data:
foo1: bar1
kind: ConfigMap
metadata:
name: ` + configMapName + `
namespace: ` + configMapNamespace
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"SyncSet", syncSetName, "-n", oc.Namespace(), "--type", "merge", "-p", patchYaml}).check(oc)
e2e.Logf("Check data field in ConfigMap on target cluster should update.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo1":"bar1"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName, "-n", configMapNamespace, "-o=jsonpath={.data}"}).check(oc)
e2e.Logf("Create SyncSet with applyBehavior=CreateOnly.")
syncSetName2 := testCaseID + "-syncset-2"
configMapName2 := testCaseID + "-configmap-2"
configMapNamespace2 := testCaseID + "-" + getRandomString() + "-hive-2"
applyBehavior := "CreateOnly"
syncTemp2 := filepath.Join(testDataDir, "syncset-resource.yaml")
syncResource2 := syncSetResource{
name: syncSetName2,
namespace: oc.Namespace(),
namespace2: configMapNamespace2,
cdrefname: cdName,
cmname: configMapName2,
cmnamespace: configMapNamespace2,
ramode: resourceMode,
applybehavior: applyBehavior,
template: syncTemp2,
}
defer cleanupObjects(oc, objectTableRef{"SyncSet", oc.Namespace(), syncSetName2})
syncResource2.create(oc)
e2e.Logf("Check ConfigMap is created on target cluster and should not have the last-applied-config annotation.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName2, "-n", configMapNamespace2, "-o=jsonpath={.data}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "kubectl.kubernetes.io/last-applied-configuration", nok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName2, "-n", configMapNamespace2, "-o=jsonpath={.metadata.annotations}"}).check(oc)
e2e.Logf("Patch syncset resource.")
patchYaml = `
spec:
resources:
- apiVersion: v1
kind: Namespace
metadata:
name: ` + configMapNamespace2 + `
- apiVersion: v1
data:
foo1: bar1
kind: ConfigMap
metadata:
name: ` + configMapName2 + `
namespace: ` + configMapNamespace2
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"SyncSet", syncSetName2, "-n", oc.Namespace(), "--type", "merge", "-p", patchYaml}).check(oc)
e2e.Logf("Check data field in ConfigMap on target cluster should not update.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName2, "-n", configMapNamespace2, "-o=jsonpath={.data}"}).check(oc)
e2e.Logf("Create SyncSet with applyBehavior=CreateOrUpdate.")
syncSetName3 := testCaseID + "-syncset-3"
configMapName3 := testCaseID + "-configmap-3"
configMapNamespace3 := testCaseID + "-" + getRandomString() + "-hive-3"
applyBehavior = "CreateOrUpdate"
syncTemp3 := filepath.Join(testDataDir, "syncset-resource.yaml")
syncResource3 := syncSetResource{
name: syncSetName3,
namespace: oc.Namespace(),
namespace2: configMapNamespace3,
cdrefname: cdName,
cmname: configMapName3,
cmnamespace: configMapNamespace3,
ramode: resourceMode,
applybehavior: applyBehavior,
template: syncTemp3,
}
defer cleanupObjects(oc, objectTableRef{"SyncSet", oc.Namespace(), syncSetName3})
syncResource3.create(oc)
e2e.Logf("Check ConfigMap is created on target cluster and should not have the last-applied-config annotation.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName3, "-n", configMapNamespace3, "-o=jsonpath={.data}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "kubectl.kubernetes.io/last-applied-configuration", nok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName3, "-n", configMapNamespace3, "-o=jsonpath={.metadata.annotations}"}).check(oc)
e2e.Logf("Patch syncset resource.")
patchYaml = `
spec:
resources:
- apiVersion: v1
kind: Namespace
metadata:
name: ` + configMapNamespace3 + `
- apiVersion: v1
data:
foo2: bar2
kind: ConfigMap
metadata:
name: ` + configMapName3 + `
namespace: ` + configMapNamespace3
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"SyncSet", syncSetName3, "-n", oc.Namespace(), "--type", "merge", "-p", patchYaml}).check(oc)
e2e.Logf("Check data field in ConfigMap on target cluster should update and contain both foo and foo2.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar","foo2":"bar2"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName3, "-n", configMapNamespace3, "-o=jsonpath={.data}"}).check(oc)
e2e.Logf("Patch syncset resource.")
patchYaml = `
spec:
resources:
- apiVersion: v1
kind: Namespace
metadata:
name: ` + configMapNamespace3 + `
- apiVersion: v1
data:
foo: bar-test
foo3: bar3
kind: ConfigMap
metadata:
name: ` + configMapName3 + `
namespace: ` + configMapNamespace3
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"SyncSet", syncSetName3, "-n", oc.Namespace(), "--type", "merge", "-p", patchYaml}).check(oc)
e2e.Logf("Check data field in ConfigMap on target cluster should update, patch foo and add foo3.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar-test","foo2":"bar2","foo3":"bar3"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName3, "-n", configMapNamespace3, "-o=jsonpath={.data}"}).check(oc)
exutil.By("OCP-25333: Changing apiGroup for ClusterRoleBinding in SyncSet doesn't delete the CRB")
e2e.Logf("Create SyncSet with invalid apiGroup in resource CR.")
syncSetName4 := testCaseID + "-syncset-4"
syncsetYaml := `
apiVersion: hive.openshift.io/v1
kind: SyncSet
metadata:
name: ` + syncSetName4 + `
spec:
clusterDeploymentRefs:
- name: ` + cdName + `
- namespace: ` + oc.Namespace() + `
resourceApplyMode: Sync
resources:
- apiVersion: authorization.openshift.io/v1
kind: ClusterRoleBinding
metadata:
name: dedicated-admins-cluster
subjects:
- kind: Group
name: dedicated-admins
- kind: Group
name: system:serviceaccounts:dedicated-admin
roleRef:
name: dedicated-admins-cluster`
var filename = testCaseID + "-syncset-crb.yaml"
err = ioutil.WriteFile(filename, []byte(syncsetYaml), 0644)
defer os.Remove(filename)
o.Expect(err).NotTo(o.HaveOccurred())
output, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filename, "-n", oc.Namespace()).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(`Invalid value: "authorization.openshift.io/v1": must use kubernetes group for this resource kind`))
e2e.Logf("oc create syncset failed, this is expected.")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, syncSetName4, nok, DefaultTimeout, []string{"SyncSet", "-n", oc.Namespace()}).check(oc)
})
//author: [email protected]
//The case OCP-78499 is supported starting from version 4.19.
g.It("Author:mihuang-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Medium-35069-High-78499-Hive supports cluster hibernation for gcp[Serial]", func() {
testCaseID := "35069"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config GCP Install-Config Secret...")
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: filepath.Join(testDataDir, "gcp-install-config.yaml"),
}
exutil.By("Config GCP ClusterDeployment...")
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Check GCP ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
exutil.By("OCP-78499: Verify whether the discardLocalSsdOnHibernate field exists")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "false", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.platform.gcp.discardLocalSsdOnHibernate}"}).check(oc)
exutil.By("Check CD has Hibernating condition")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "False", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Hibernating")].status}`}).check(oc)
exutil.By("patch the CD to Hibernating...")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"powerState": "Hibernating"}}`}).check(oc)
e2e.Logf("OCP-78499: Wait until the CD successfully reaches the Hibernating state.")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "Hibernating", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.powerState}"}).check(oc)
e2e.Logf("Check cd's condition")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "True", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Hibernating")].status}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "False", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Ready")].status}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "True", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Unreachable")].status}`}).check(oc)
exutil.By("patch the CD to Running...")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"powerState": "Running"}}`}).check(oc)
e2e.Logf("Wait for CD to be Running")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "Running", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.powerState}"}).check(oc)
e2e.Logf("Check cd's condition")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "False", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Hibernating")].status}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "True", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Ready")].status}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "False", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Unreachable")].status}`}).check(oc)
})
//author: [email protected]
//default duration is 15m for extended-platform-tests and 35m for jenkins job, need to reset for ClusterPool and ClusterDeployment cases
//example: ./bin/extended-platform-tests run all --dry-run|grep "52411"|./bin/extended-platform-tests run --timeout 60m -f -
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-Medium-52411-[GCP]Hive Machinepool test for autoscale [Serial]", func() {
testCaseID := "52411"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config GCP Install-Config Secret...")
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: filepath.Join(testDataDir, "gcp-install-config.yaml"),
}
exutil.By("Config GCP ClusterDeployment...")
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Create infra MachinePool ...")
inframachinepoolGCPTemp := filepath.Join(testDataDir, "machinepool-infra-gcp.yaml")
inframp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: inframachinepoolGCPTemp,
}
defer cleanupObjects(oc, objectTableRef{"MachinePool", oc.Namespace(), cdName + "-infra"})
inframp.create(oc)
exutil.By("Check if ClusterDeployment created successfully and become Provisioned")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
tmpDir := "/tmp/" + cdName + "-" + getRandomString()
err = os.MkdirAll(tmpDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(tmpDir)
getClusterKubeconfig(oc, cdName, oc.Namespace(), tmpDir)
kubeconfig := tmpDir + "/kubeconfig"
e2e.Logf("Patch static replicas to autoscaler")
exutil.By("OCP-52411: [GCP]Allow minReplicas autoscaling of MachinePools to be 0")
e2e.Logf("Check hive allow set minReplicas=0 without zone setting")
autoScalingMax := "4"
autoScalingMin := "0"
removeConfig := "[{\"op\": \"remove\", \"path\": \"/spec/replicas\"}]"
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "--type", "json", "-p", removeConfig}).check(oc)
autoscalConfig := fmt.Sprintf("{\"spec\": {\"autoscaling\": {\"maxReplicas\": %s, \"minReplicas\": %s}}}", autoScalingMax, autoScalingMin)
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "--type", "merge", "-p", autoscalConfig}).check(oc)
e2e.Logf("Check replicas is 0")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "0 0 0 0", ok, 2*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[*].status.replicas}"}).check(oc)
e2e.Logf("Check hive allow set minReplicas=0 within zone setting")
cleanupObjects(oc, objectTableRef{"MachinePool", oc.Namespace(), cdName + "-infra"})
infra2MachinepoolYaml := `
apiVersion: hive.openshift.io/v1
kind: MachinePool
metadata:
name: ` + cdName + `-infra2
namespace: ` + oc.Namespace() + `
spec:
autoscaling:
maxReplicas: 4
minReplicas: 0
clusterDeploymentRef:
name: ` + cdName + `
labels:
node-role.kubernetes.io: infra2
node-role.kubernetes.io/infra2: ""
name: infra2
platform:
gcp:
osDisk: {}
type: n1-standard-4
zones:
- ` + GCPRegion + `-a
- ` + GCPRegion + `-b
- ` + GCPRegion + `-c
- ` + GCPRegion + `-f`
var filename = testCaseID + "-machinepool-infra2.yaml"
err = ioutil.WriteFile(filename, []byte(infra2MachinepoolYaml), 0644)
defer os.Remove(filename)
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", filename, "--ignore-not-found").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filename).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check replicas is 0")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "0 0 0 0", ok, 2*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra2", "-o=jsonpath={.items[*].status.replicas}"}).check(oc)
exutil.By("Check Hive supports autoscale for GCP")
patchYaml := `
spec:
scaleDown:
enabled: true
delayAfterAdd: 10s
delayAfterDelete: 10s
delayAfterFailure: 10s
unneededTime: 10s`
e2e.Logf("Add busybox in remote cluster and check machines will scale up to maxReplicas")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ClusterAutoscaler", "default", "--type", "merge", "-p", patchYaml}).check(oc)
workloadYaml := filepath.Join(testDataDir, "workload.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("--kubeconfig="+kubeconfig, "-f", workloadYaml, "--ignore-not-found").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("--kubeconfig="+kubeconfig, "-f", workloadYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "busybox", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Deployment", "busybox", "-n", "default"}).check(oc)
e2e.Logf("Check replicas will scale up to maximum value")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1 1 1 1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra2", "-o=jsonpath={.items[*].status.replicas}"}).check(oc)
e2e.Logf("Delete busybox in remote cluster and check machines will scale down to minReplicas %s", autoScalingMin)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("--kubeconfig="+kubeconfig, "-f", workloadYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check replicas will scale down to minimum value")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "0 0 0 0", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra2", "-o=jsonpath={.items[*].status.replicas}"}).check(oc)
})
//author: [email protected]
//default duration is 15m for extended-platform-tests and 35m for jenkins job, need to reset for ClusterPool and ClusterDeployment cases
//example: ./bin/extended-platform-tests run all --dry-run|grep "46729"|./bin/extended-platform-tests run --timeout 60m -f -
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-ConnectedOnly-Author:lwan-Medium-46729-[HIVE]Support overriding installer image [Serial]", func() {
testCaseID := "46729"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
imageSetName := cdName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and gcp-credentials to target namespace for the clusterdeployment
exutil.By("Copy GCP platform credentials...")
createGCPCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create GCP Install-Config Secret...")
installConfigTemp := filepath.Join(testDataDir, "gcp-install-config.yaml")
installConfigSecretName := cdName + "-install-config"
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: installConfigSecretName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: installConfigTemp,
}
defer cleanupObjects(oc, objectTableRef{"secret", oc.Namespace(), installConfigSecretName})
installConfigSecret.create(oc)
exutil.By("Create GCP ClusterDeployment...")
clusterTemp := filepath.Join(testDataDir, "clusterdeployment-gcp.yaml")
clusterVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={.status.desired.version}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(clusterVersion).NotTo(o.BeEmpty())
installerImageForOverride, err := getPullSpec(oc, "installer", clusterVersion)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(installerImageForOverride).NotTo(o.BeEmpty())
e2e.Logf("ClusterVersion is %s, installerImageForOverride is %s", clusterVersion, installerImageForOverride)
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: imageSetName,
installConfigSecret: installConfigSecretName,
pullSecretRef: PullSecret,
installerImageOverride: installerImageForOverride,
installAttemptsLimit: 3,
template: clusterTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterDeployment", oc.Namespace(), cdName})
cluster.create(oc)
exutil.By("Check installer image is overrided via \"installerImageOverride\" field")
e2e.Logf("Check cd .status.installerImage")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, installerImageForOverride, ok, 2*DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.installerImage}"}).check(oc)
e2e.Logf("Check Installer commitID in provision pod log matches commitID from overrided Installer image")
commitID, err := getCommitID(oc, "\" installer \"", clusterVersion)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(commitID).NotTo(o.BeEmpty())
e2e.Logf("Installer commitID is %s", commitID)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "", nok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.provisionRef.name}"}).check(oc)
provisionName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.provisionRef.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
newCheck("expect", "logs", asAdmin, withoutNamespace, contain, commitID, ok, DefaultTimeout, []string{"-n", oc.Namespace(), fmt.Sprintf("jobs/%s-provision", provisionName), "-c", "hive"}).check(oc)
})
//author: [email protected]
//default duration is 15m for extended-platform-tests and 35m for jenkins job, need to reset for ClusterPool and ClusterDeployment cases
//example: ./bin/extended-platform-tests run all --dry-run|grep "45279"|./bin/extended-platform-tests run --timeout 15m -f -
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-ConnectedOnly-Author:lwan-Medium-45279-Test Metric for ClusterClaim[Serial]", func() {
// Expose Hive metrics, and neutralize the effect after finishing the test case
needRecover, prevConfig := false, ""
defer recoverClusterMonitoring(oc, &needRecover, &prevConfig)
exposeMetrics(oc, testDataDir, &needRecover, &prevConfig)
testCaseID := "45279"
poolName := "pool-" + testCaseID
imageSetName := poolName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
exutil.By("Check if ClusterImageSet was created successfully")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, imageSetName, ok, DefaultTimeout, []string{"ClusterImageSet"}).check(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and gcp-credentials to target namespace for the pool
exutil.By("Copy GCP platform credentials...")
createGCPCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create ClusterPool...")
poolTemp := filepath.Join(testDataDir, "clusterpool-gcp.yaml")
pool := gcpClusterPool{
name: poolName,
namespace: oc.Namespace(),
fake: "true",
baseDomain: GCPBaseDomain,
imageSetRef: imageSetName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
pullSecretRef: PullSecret,
size: 2,
maxSize: 2,
runningCount: 2,
maxConcurrent: 2,
hibernateAfter: "360m",
template: poolTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterPool", oc.Namespace(), poolName})
pool.create(oc)
exutil.By("Check if GCP ClusterPool created successfully and become ready")
//runningCount is 2 so pool status should be standby: 0, ready: 2
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "2", ok, DefaultTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "-o=jsonpath={.status.ready}"}).check(oc)
exutil.By("Check if CD is Running")
cdListStr := getCDlistfromPool(oc, poolName)
var cdArray []string
cdArray = strings.Split(strings.TrimSpace(cdListStr), "\n")
for i := range cdArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "Running", ok, DefaultTimeout, []string{"ClusterDeployment", cdArray[i], "-n", cdArray[i]}).check(oc)
}
exutil.By("Create ClusterClaim...")
claimTemp := filepath.Join(testDataDir, "clusterclaim.yaml")
claimName1 := poolName + "-claim-1"
claim1 := clusterClaim{
name: claimName1,
namespace: oc.Namespace(),
clusterPoolName: poolName,
template: claimTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterClaim", oc.Namespace(), claimName1})
claim1.create(oc)
e2e.Logf("Check if ClusterClaim %s created successfully", claimName1)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, claimName1, ok, DefaultTimeout, []string{"ClusterClaim", "-n", oc.Namespace(), "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
exutil.By("Check Metrics for ClusterClaim...")
token, err := exutil.GetSAToken(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(token).NotTo(o.BeEmpty())
query1 := "hive_clusterclaim_assignment_delay_seconds_sum"
query2 := "hive_clusterclaim_assignment_delay_seconds_count"
query3 := "hive_clusterclaim_assignment_delay_seconds_bucket"
query := []string{query1, query2, query3}
exutil.By("Check hive metrics for clusterclaim exist")
checkMetricExist(oc, ok, token, thanosQuerierURL, query)
e2e.Logf("Check metric %s Value is 1", query2)
checkResourcesMetricValue(oc, poolName, oc.Namespace(), "1", token, thanosQuerierURL, query2)
exutil.By("Create another ClusterClaim...")
claimName2 := poolName + "-claim-2"
claim2 := clusterClaim{
name: claimName2,
namespace: oc.Namespace(),
clusterPoolName: poolName,
template: claimTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterClaim", oc.Namespace(), claimName2})
claim2.create(oc)
e2e.Logf("Check if ClusterClaim %s created successfully", claimName2)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, claimName2, ok, DefaultTimeout, []string{"ClusterClaim", "-n", oc.Namespace(), "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
e2e.Logf("Check metric %s Value change to 2", query2)
checkResourcesMetricValue(oc, poolName, oc.Namespace(), "2", token, thanosQuerierURL, query2)
})
//author: [email protected]
//example: ./bin/extended-platform-tests run all --dry-run|grep "54463"|./bin/extended-platform-tests run --timeout 35m -f -
g.It("NonHyperShiftHOST-NonPreRelease-Longduration-ConnectedOnly-Author:mihuang-Medium-54463-Add cluster install success/fail metrics[Serial]", func() {
// Expose Hive metrics, and neutralize the effect after finishing the test case
needRecover, prevConfig := false, ""
defer recoverClusterMonitoring(oc, &needRecover, &prevConfig)
exposeMetrics(oc, testDataDir, &needRecover, &prevConfig)
testCaseID := "54463"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
imageSetName := cdName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and gcp-credentials to target namespace for the clusterdeployment
exutil.By("Don't copy GCP platform credentials make install fail...")
//createGCPCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create GCP Install-Config Secret...")
installConfigTemp := filepath.Join(testDataDir, "gcp-install-config.yaml")
installConfigSecretName := cdName + "-install-config"
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: installConfigSecretName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: installConfigTemp,
}
defer cleanupObjects(oc, objectTableRef{"secret", oc.Namespace(), installConfigSecretName})
installConfigSecret.create(oc)
exutil.By("Get SA token to check Metrics...")
token, err := exutil.GetSAToken(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(token).NotTo(o.BeEmpty())
var installAttemptsLimit = []int{3, 1}
for i := 0; i < len(installAttemptsLimit); i++ {
func() {
if installAttemptsLimit[i] == 3 {
exutil.By("Config GCP ClusterDeployment with installAttemptsLimit=3 and make install fail..")
} else {
exutil.By("Config GCP ClusterDeployment with installAttemptsLimit=1 and make install success..")
exutil.By("Copy GCP platform credentials make install success...")
createGCPCreds(oc, oc.Namespace())
}
cluster := gcpClusterDeployment{
fake: "true",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: installAttemptsLimit[i],
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanupObjects(oc, objectTableRef{"ClusterDeployment", oc.Namespace(), cdName})
cluster.create(oc)
if installAttemptsLimit[i] == 3 {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "InstallAttemptsLimitReached", ok, 5*DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"ProvisionStopped\")].reason}"}).check(oc)
o.Expect(checkResourceNumber(oc, cdName, []string{"pods", "-A"})).To(o.Equal(3))
queryFailSum := "hive_cluster_deployment_install_failure_total_sum"
queryFailCount := "hive_cluster_deployment_install_failure_total_count"
queryFailBucket := "hive_cluster_deployment_install_failure_total_bucket"
queryFail := []string{queryFailSum, queryFailCount, queryFailBucket}
exutil.By("Check hive metrics for cd install fail")
checkMetricExist(oc, ok, token, thanosQuerierURL, queryFail)
e2e.Logf("Check metric %s with install_attempt = 2", queryFailCount)
checkResourcesMetricValue(oc, GCPRegion, HiveNamespace, "2", token, thanosQuerierURL, queryFailCount)
e2e.Logf("delete cd and create a success case")
} else {
exutil.By("Check GCP ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
querySuccSum := "hive_cluster_deployment_install_success_total_sum"
querySuccCount := "hive_cluster_deployment_install_success_total_count"
querySuccBucket := "hive_cluster_deployment_install_success_total_bucket"
querySuccess := []string{querySuccSum, querySuccCount, querySuccBucket}
exutil.By("Check hive metrics for cd installed successfully")
checkMetricExist(oc, ok, token, thanosQuerierURL, querySuccess)
e2e.Logf("Check metric %s with with install_attempt = 0", querySuccCount)
checkResourcesMetricValue(oc, GCPRegion, HiveNamespace, "0", token, thanosQuerierURL, querySuccCount)
}
}()
}
})
// Timeout: 60min
g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:jshu-High-68294-GCP Shared VPC support for MachinePool[Serial]", func() {
testCaseID := "68294"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
//oc.SetupProject()
exutil.By("Config GCP Install-Config Secret...")
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
computeSubnet: "installer-shared-vpc-subnet-2",
controlPlaneSubnet: "installer-shared-vpc-subnet-1",
network: "installer-shared-vpc",
networkProjectId: "openshift-qe-shared-vpc",
template: filepath.Join(testDataDir, "gcp-install-config-sharedvpc.yaml"),
}
exutil.By("Config GCP ClusterDeployment...")
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Create the infra MachinePool with the shared vpc...")
inframachinepoolGCPTemp := filepath.Join(testDataDir, "machinepool-infra-gcp-sharedvpc.yaml")
inframp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: inframachinepoolGCPTemp,
}
defer cleanupObjects(oc,
objectTableRef{"MachinePool", oc.Namespace(), cdName + "-infra"},
)
inframp.create(oc)
exutil.By("Check GCP ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
exutil.By("Check the infra MachinePool .status.replicas = 1")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "1", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
})
})
| package hive | ||||
test case | openshift/openshift-tests-private | 5705f1fb-7bf7-482d-8644-bb0c769d0fdb | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:fxie-Critical-68240-Enable UEFISecureBoot for day 2 VMs on GCP [Serial] | ['"context"', '"fmt"', '"path/filepath"', '"time"', '"cloud.google.com/go/compute/apiv1/computepb"', '"google.golang.org/api/iterator"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:fxie-Critical-68240-Enable UEFISecureBoot for day 2 VMs on GCP [Serial]", func() {
var (
testCaseID = "68240"
cdName = "cd-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
cdTemplate = filepath.Join(testDataDir, "clusterdeployment-gcp.yaml")
icName = cdName + "-install-config"
icTemplate = filepath.Join(testDataDir, "gcp-install-config.yaml")
imageSetName = cdName + "-imageset"
mpTemplate = filepath.Join(testDataDir, "machinepool-infra-gcp.yaml")
)
var (
// Count the number of VMs in a project, after filtering with the passed-in filter
countVMs = func(client *compute.InstancesClient, projectID, filter string) (vmCount int) {
instancesIterator := client.AggregatedList(context.Background(), &computepb.AggregatedListInstancesRequest{
Filter: &filter,
Project: projectID,
})
for {
resp, err := instancesIterator.Next()
if err == iterator.Done {
break
}
o.Expect(err).NotTo(o.HaveOccurred())
vmCount += len(resp.Value.Instances)
}
e2e.Logf("Found VM count = %v", vmCount)
return vmCount
}
)
exutil.By("Getting project ID from the Hive cd")
projectID, err := exutil.GetGcpProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
e2e.Logf("Found project ID = %v", projectID)
exutil.By("Creating a spoke cluster with shielded VM enabled")
installConfigSecret := gcpInstallConfig{
name1: icName,
namespace: oc.Namespace(),
baseDomain: basedomain,
name2: cdName,
region: region,
projectid: projectID,
template: icTemplate,
secureBoot: "Enabled",
}
cd := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: basedomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: region,
imageSetRef: imageSetName,
installConfigSecret: icName,
pullSecretRef: PullSecret,
installAttemptsLimit: 1,
template: cdTemplate,
}
defer cleanCD(oc, imageSetName, oc.Namespace(), icName, cdName)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cd)
exutil.By("Waiting for the CD to be installed")
newCheck("expect", "get", asAdmin, requireNS, compare, "true", ok,
ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-o=jsonpath={.spec.installed}"}).check(oc)
// The Google cloud SDK must be able to locate Application Default Credentials (ADC).
// To this end, we should point the GOOGLE_APPLICATION_CREDENTIALS environment
// variable to a Google cloud credential file.
instancesClient, err := compute.NewInstancesRESTClient(context.Background())
o.Expect(err).NotTo(o.HaveOccurred())
filter := fmt.Sprintf("(name=%s*) AND (shieldedInstanceConfig.enableSecureBoot = true)", cdName)
o.Expect(countVMs(instancesClient, projectID, filter)).To(o.Equal(6))
exutil.By("Create an infra MachinePool with secureboot enabled")
inframp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: mpTemplate,
gcpSecureBoot: "Enabled",
}
// The inframp will be deprovisioned along with the CD, so no need to defer a deletion here.
inframp.create(oc)
exutil.By("Make sure all infraVMs have secureboot enabled")
infraId := getInfraIDFromCDName(oc, cdName)
filterInfra := fmt.Sprintf("(name=%s*) AND (shieldedInstanceConfig.enableSecureBoot = true)", infraId+"-infra")
o.Eventually(func() bool {
return countVMs(instancesClient, projectID, filterInfra) == 1
}).WithTimeout(15 * time.Minute).WithPolling(30 * time.Second).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 0d1cf2e2-8476-4b39-9654-582b4783bcce | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-High-41777-High-28636-Hive API support for GCP[Serial] | ['"os"', '"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-High-41777-High-28636-Hive API support for GCP[Serial]", func() {
testCaseID := "41777"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config GCP Install-Config Secret...")
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: filepath.Join(testDataDir, "gcp-install-config.yaml"),
}
exutil.By("Config GCP ClusterDeployment...")
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Create worker and infra MachinePool ...")
workermachinepoolGCPTemp := filepath.Join(testDataDir, "machinepool-worker-gcp.yaml")
inframachinepoolGCPTemp := filepath.Join(testDataDir, "machinepool-infra-gcp.yaml")
workermp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: workermachinepoolGCPTemp,
}
inframp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: inframachinepoolGCPTemp,
}
defer cleanupObjects(oc,
objectTableRef{"MachinePool", oc.Namespace(), cdName + "-worker"},
objectTableRef{"MachinePool", oc.Namespace(), cdName + "-infra"},
)
workermp.create(oc)
inframp.create(oc)
exutil.By("Check GCP ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
exutil.By("OCP-28636: Hive supports remote Machine Set Management for GCP")
tmpDir := "/tmp/" + cdName + "-" + getRandomString()
err = os.MkdirAll(tmpDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(tmpDir)
getClusterKubeconfig(oc, cdName, oc.Namespace(), tmpDir)
kubeconfig := tmpDir + "/kubeconfig"
e2e.Logf("Check worker machinepool .status.replicas = 3")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "3", ok, DefaultTimeout, []string{"MachinePool", cdName + "-worker", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
e2e.Logf("Check infra machinepool .status.replicas = 1 ")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "1", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
machinesetsname := getResource(oc, asAdmin, withoutNamespace, "MachinePool", cdName+"-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.machineSets[?(@.replicas==1)].name}")
o.Expect(machinesetsname).NotTo(o.BeEmpty())
e2e.Logf("Remote cluster machineset list: %s", machinesetsname)
e2e.Logf("Check machineset %s created on remote cluster", machinesetsname)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, machinesetsname, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].metadata.name}"}).check(oc)
e2e.Logf("Check only 1 machineset up")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].status.availableReplicas}"}).check(oc)
e2e.Logf("Check only one machines in Running status")
// Can't filter by infra label because of bug https://issues.redhat.com/browse/HIVE-1922
//newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=infra", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-o=jsonpath={.items[?(@.spec.metadata.labels.node-role\\.kubernetes\\.io==\"infra\")].status.phase}"}).check(oc)
e2e.Logf("Patch infra machinepool .spec.replicas to 3")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"replicas": 3}}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "3", ok, 5*DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
machinesetsname = getResource(oc, asAdmin, withoutNamespace, "MachinePool", cdName+"-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.machineSets[?(@.replicas==1)].name}")
o.Expect(machinesetsname).NotTo(o.BeEmpty())
e2e.Logf("Remote cluster machineset list: %s", machinesetsname)
e2e.Logf("Check machineset %s created on remote cluster", machinesetsname)
machinesetsArray := strings.Fields(machinesetsname)
o.Expect(len(machinesetsArray) == 3).Should(o.BeTrue())
for _, machinesetName := range machinesetsArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, machinesetName, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].metadata.name}"}).check(oc)
}
e2e.Logf("Check machinesets scale up to 3")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1 1 1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].status.availableReplicas}"}).check(oc)
e2e.Logf("Check 3 machines in Running status")
// Can't filter by infra label because of bug https://issues.redhat.com/browse/HIVE-1922
//newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=infra", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-o=jsonpath={.items[?(@.spec.metadata.labels.node-role\\.kubernetes\\.io==\"infra\")].status.phase}"}).check(oc)
e2e.Logf("Patch infra machinepool .spec.replicas to 2")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"replicas": 2}}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "2", ok, 5*DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
machinesetsname = getResource(oc, asAdmin, withoutNamespace, "MachinePool", cdName+"-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.machineSets[?(@.replicas==1)].name}")
o.Expect(machinesetsname).NotTo(o.BeEmpty())
e2e.Logf("Remote cluster machineset list: %s", machinesetsname)
e2e.Logf("Check machineset %s created on remote cluster", machinesetsname)
machinesetsArray = strings.Fields(machinesetsname)
o.Expect(len(machinesetsArray) == 2).Should(o.BeTrue())
for _, machinesetName := range machinesetsArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, machinesetName, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].metadata.name}"}).check(oc)
}
e2e.Logf("Check machinesets scale down to 2")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1 1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[?(@.spec.replicas==1)].status.availableReplicas}"}).check(oc)
e2e.Logf("Check 2 machines in Running status")
// Can't filter by infra label because of bug https://issues.redhat.com/browse/HIVE-1922
//newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machine-role=infra", "-o=jsonpath={.items[*].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Machine", "-n", "openshift-machine-api", "-o=jsonpath={.items[?(@.spec.metadata.labels.node-role\\.kubernetes\\.io==\"infra\")].status.phase}"}).check(oc)
}) | |||||
test case | openshift/openshift-tests-private | 0719d980-8cc6-42ba-aa65-66b72b78859c | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-Medium-33872-[gcp]Hive supports ClusterPool [Serial] | ['"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-Medium-33872-[gcp]Hive supports ClusterPool [Serial]", func() {
testCaseID := "33872"
poolName := "pool-" + testCaseID
imageSetName := poolName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
exutil.By("Check if ClusterImageSet was created successfully")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, imageSetName, ok, DefaultTimeout, []string{"ClusterImageSet"}).check(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and gcp-credentials to target namespace for the pool
exutil.By("Copy GCP platform credentials...")
createGCPCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create ClusterPool...")
poolTemp := filepath.Join(testDataDir, "clusterpool-gcp.yaml")
pool := gcpClusterPool{
name: poolName,
namespace: oc.Namespace(),
fake: "false",
baseDomain: GCPBaseDomain,
imageSetRef: imageSetName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
pullSecretRef: PullSecret,
size: 1,
maxSize: 1,
runningCount: 0,
maxConcurrent: 1,
hibernateAfter: "360m",
template: poolTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterPool", oc.Namespace(), poolName})
pool.create(oc)
exutil.By("Check if GCP ClusterPool created successfully and become ready")
//runningCount is 0 so pool status should be standby: 1, ready: 0
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "1", ok, ClusterInstallTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "-o=jsonpath={.status.standby}"}).check(oc)
exutil.By("Check if CD is Hibernating")
cdListStr := getCDlistfromPool(oc, poolName)
var cdArray []string
cdArray = strings.Split(strings.TrimSpace(cdListStr), "\n")
for i := range cdArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "Hibernating", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdArray[i], "-n", cdArray[i]}).check(oc)
}
exutil.By("Patch pool.spec.lables.test=test...")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"labels":{"test":"test"}}}`}).check(oc)
exutil.By("The existing CD in the pool has no test label")
for i := range cdArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "test", nok, DefaultTimeout, []string{"ClusterDeployment", cdArray[i], "-n", cdArray[i], "-o=jsonpath={.metadata.labels}"}).check(oc)
}
exutil.By("The new CD in the pool should have the test label")
e2e.Logf("Delete the old CD in the pool")
newCheck("expect", "delete", asAdmin, withoutNamespace, contain, "delete", ok, ClusterUninstallTimeout, []string{"ClusterDeployment", cdArray[0], "-n", cdArray[0]}).check(oc)
e2e.Logf("Get the CD list from the pool again.")
cdListStr = getCDlistfromPool(oc, poolName)
cdArray = strings.Split(strings.TrimSpace(cdListStr), "\n")
for i := range cdArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "test", ok, DefaultTimeout, []string{"ClusterDeployment", cdArray[i], "-n", cdArray[i], "-o=jsonpath={.metadata.labels}"}).check(oc)
}
}) | |||||
test case | openshift/openshift-tests-private | e7e8a724-2af4-4a48-9355-0cfa707a3ff2 | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:liangli-Medium-44475-Medium-45158-[gcp]Hive Change BaseDomain field right after creating pool and all clusters finish install firstly then recreated [Serial] | ['"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:liangli-Medium-44475-Medium-45158-[gcp]Hive Change BaseDomain field right after creating pool and all clusters finish install firstly then recreated [Serial]", func() {
testCaseID := "44475"
poolName := "pool-" + testCaseID
imageSetName := poolName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
exutil.By("Check if ClusterImageSet was created successfully")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, imageSetName, ok, DefaultTimeout, []string{"ClusterImageSet"}).check(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and gcp-credentials to target namespace for the clusterdeployment
exutil.By("Copy GCP platform credentials...")
createGCPCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create ClusterPool...")
poolTemp := filepath.Join(testDataDir, "clusterpool-gcp.yaml")
pool := gcpClusterPool{
name: poolName,
namespace: oc.Namespace(),
fake: "false",
baseDomain: GCPBaseDomain,
imageSetRef: imageSetName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
pullSecretRef: PullSecret,
size: 1,
maxSize: 1,
runningCount: 0,
maxConcurrent: 1,
hibernateAfter: "360m",
template: poolTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterPool", oc.Namespace(), poolName})
pool.create(oc)
e2e.Logf("Check ClusterDeployment in pool created")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, poolName, ok, DefaultTimeout, []string{"ClusterDeployment", "-A", "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
e2e.Logf("get old ClusterDeployment Name")
cdListStr := getCDlistfromPool(oc, poolName)
oldClusterDeploymentName := strings.Split(strings.TrimSpace(cdListStr), "\n")
o.Expect(len(oldClusterDeploymentName) > 0).Should(o.BeTrue())
e2e.Logf("old cd name:" + oldClusterDeploymentName[0])
exutil.By("OCP-45158: Check Provisioned condition")
e2e.Logf("Check ClusterDeployment is provisioning")
expectedResult := "message:Cluster is provisioning,reason:Provisioning,status:False"
jsonPath := "-o=jsonpath={\"message:\"}{.status.conditions[?(@.type==\"Provisioned\")].message}{\",reason:\"}{.status.conditions[?(@.type==\"Provisioned\")].reason}{\",status:\"}{.status.conditions[?(@.type==\"Provisioned\")].status}"
newCheck("expect", "get", asAdmin, withoutNamespace, contain, expectedResult, ok, DefaultTimeout, []string{"ClusterDeployment", oldClusterDeploymentName[0], "-n", oldClusterDeploymentName[0], jsonPath}).check(oc)
e2e.Logf("Check ClusterDeployment Provisioned finish")
expectedResult = "message:Cluster is provisioned,reason:Provisioned,status:True"
newCheck("expect", "get", asAdmin, withoutNamespace, contain, expectedResult, ok, ClusterInstallTimeout, []string{"ClusterDeployment", oldClusterDeploymentName[0], "-n", oldClusterDeploymentName[0], jsonPath}).check(oc)
exutil.By("Check if GCP ClusterPool created successfully and become ready")
//runningCount is 0 so pool status should be standby: 1, ready: 0
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "1", ok, DefaultTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "-o=jsonpath={.status.standby}"}).check(oc)
exutil.By("test OCP-44475")
e2e.Logf("oc patch ClusterPool 'spec.baseDomain'")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("ClusterPool", poolName, "-n", oc.Namespace(), "-p", `{"spec":{"baseDomain":"`+GCPBaseDomain2+`"}}`, "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check ClusterDeployment is Deprovisioning")
expectedResult = "message:Cluster is deprovisioning,reason:Deprovisioning,status:False"
newCheck("expect", "get", asAdmin, withoutNamespace, contain, expectedResult, ok, DefaultTimeout, []string{"ClusterDeployment", oldClusterDeploymentName[0], "-n", oldClusterDeploymentName[0], jsonPath}).check(oc)
e2e.Logf("Check ClusterDeployment is Deprovisioned")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, oldClusterDeploymentName[0], nok, ClusterUninstallTimeout, []string{"ClusterDeployment", "-A", "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
e2e.Logf("Check if ClusterPool re-create the CD")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, poolName, ok, DefaultTimeout, []string{"ClusterDeployment", "-A", "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
e2e.Logf("get new ClusterDeployment name")
cdListStr = getCDlistfromPool(oc, poolName)
newClusterDeploymentName := strings.Split(strings.TrimSpace(cdListStr), "\n")
o.Expect(len(newClusterDeploymentName) > 0).Should(o.BeTrue())
e2e.Logf("new cd name:" + newClusterDeploymentName[0])
newCheck("expect", "get", asAdmin, withoutNamespace, contain, GCPBaseDomain2, ok, DefaultTimeout, []string{"ClusterDeployment", newClusterDeploymentName[0], "-n", newClusterDeploymentName[0], "-o=jsonpath={.spec.baseDomain}"}).check(oc)
o.Expect(strings.Compare(oldClusterDeploymentName[0], newClusterDeploymentName[0]) != 0).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 3bd784ca-c5f8-4190-b21a-22bd40985a2e | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-High-41499-High-34404-High-25333-Hive syncset test for paused and multi-modes[Serial] | ['"io/ioutil"', '"os"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-High-41499-High-34404-High-25333-Hive syncset test for paused and multi-modes[Serial]", func() {
testCaseID := "41499"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config GCP Install-Config Secret...")
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: filepath.Join(testDataDir, "gcp-install-config.yaml"),
}
exutil.By("Config GCP ClusterDeployment...")
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Check GCP ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
tmpDir := "/tmp/" + cdName + "-" + getRandomString()
err = os.MkdirAll(tmpDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(tmpDir)
getClusterKubeconfig(oc, cdName, oc.Namespace(), tmpDir)
kubeconfig := tmpDir + "/kubeconfig"
exutil.By("OCP-41499: Add condition in ClusterDeployment status for paused syncset")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, cdName, ok, DefaultTimeout, []string{"ClusterSync", cdName, "-n", oc.Namespace()}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "False", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"SyncSetFailed\")].status}"}).check(oc)
e2e.Logf("Add \"hive.openshift.io/syncset-pause\" annotation in ClusterDeployment, and delete ClusterSync CR")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "--type", "merge", "-p", `{"metadata": {"annotations": {"hive.openshift.io/syncset-pause": "true"}}}`}).check(oc)
newCheck("expect", "delete", asAdmin, withoutNamespace, contain, "delete", ok, DefaultTimeout, []string{"ClusterSync", cdName, "-n", oc.Namespace()}).check(oc)
e2e.Logf("Check ClusterDeployment condition=SyncSetFailed")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "True", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"SyncSetFailed\")].status}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "SyncSetPaused", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"SyncSetFailed\")].reason}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "SyncSet is paused. ClusterSync will not be created", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"SyncSetFailed\")].message}"}).check(oc)
e2e.Logf("Check ClusterSync won't be created.")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, cdName, nok, DefaultTimeout, []string{"ClusterSync", "-n", oc.Namespace()}).check(oc)
e2e.Logf("Remove annotation, check ClusterSync will be created again.")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "--type", "merge", "-p", `{"metadata": {"annotations": {"hive.openshift.io/syncset-pause": "false"}}}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "False", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"SyncSetFailed\")].status}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, cdName, ok, DefaultTimeout, []string{"ClusterSync", cdName, "-n", oc.Namespace()}).check(oc)
exutil.By("OCP-34404: Hive adds muti-modes for syncset to handle applying resources too large")
e2e.Logf("Create SyncSet with default applyBehavior.")
syncSetName := testCaseID + "-syncset-1"
configMapName := testCaseID + "-configmap-1"
configMapNamespace := testCaseID + "-" + getRandomString() + "-hive-1"
resourceMode := "Sync"
syncTemp := filepath.Join(testDataDir, "syncset-resource.yaml")
syncResource := syncSetResource{
name: syncSetName,
namespace: oc.Namespace(),
namespace2: configMapNamespace,
cdrefname: cdName,
cmname: configMapName,
cmnamespace: configMapNamespace,
ramode: resourceMode,
template: syncTemp,
}
defer cleanupObjects(oc, objectTableRef{"SyncSet", oc.Namespace(), syncSetName})
syncResource.create(oc)
e2e.Logf("Check ConfigMap is created on target cluster and have a last-applied-config annotation.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName, "-n", configMapNamespace, "-o=jsonpath={.data}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "kubectl.kubernetes.io/last-applied-configuration", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName, "-n", configMapNamespace, "-o=jsonpath={.metadata.annotations}"}).check(oc)
e2e.Logf("Patch syncset resource.")
patchYaml := `
spec:
resources:
- apiVersion: v1
kind: Namespace
metadata:
name: ` + configMapNamespace + `
- apiVersion: v1
data:
foo1: bar1
kind: ConfigMap
metadata:
name: ` + configMapName + `
namespace: ` + configMapNamespace
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"SyncSet", syncSetName, "-n", oc.Namespace(), "--type", "merge", "-p", patchYaml}).check(oc)
e2e.Logf("Check data field in ConfigMap on target cluster should update.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo1":"bar1"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName, "-n", configMapNamespace, "-o=jsonpath={.data}"}).check(oc)
e2e.Logf("Create SyncSet with applyBehavior=CreateOnly.")
syncSetName2 := testCaseID + "-syncset-2"
configMapName2 := testCaseID + "-configmap-2"
configMapNamespace2 := testCaseID + "-" + getRandomString() + "-hive-2"
applyBehavior := "CreateOnly"
syncTemp2 := filepath.Join(testDataDir, "syncset-resource.yaml")
syncResource2 := syncSetResource{
name: syncSetName2,
namespace: oc.Namespace(),
namespace2: configMapNamespace2,
cdrefname: cdName,
cmname: configMapName2,
cmnamespace: configMapNamespace2,
ramode: resourceMode,
applybehavior: applyBehavior,
template: syncTemp2,
}
defer cleanupObjects(oc, objectTableRef{"SyncSet", oc.Namespace(), syncSetName2})
syncResource2.create(oc)
e2e.Logf("Check ConfigMap is created on target cluster and should not have the last-applied-config annotation.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName2, "-n", configMapNamespace2, "-o=jsonpath={.data}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "kubectl.kubernetes.io/last-applied-configuration", nok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName2, "-n", configMapNamespace2, "-o=jsonpath={.metadata.annotations}"}).check(oc)
e2e.Logf("Patch syncset resource.")
patchYaml = `
spec:
resources:
- apiVersion: v1
kind: Namespace
metadata:
name: ` + configMapNamespace2 + `
- apiVersion: v1
data:
foo1: bar1
kind: ConfigMap
metadata:
name: ` + configMapName2 + `
namespace: ` + configMapNamespace2
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"SyncSet", syncSetName2, "-n", oc.Namespace(), "--type", "merge", "-p", patchYaml}).check(oc)
e2e.Logf("Check data field in ConfigMap on target cluster should not update.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName2, "-n", configMapNamespace2, "-o=jsonpath={.data}"}).check(oc)
e2e.Logf("Create SyncSet with applyBehavior=CreateOrUpdate.")
syncSetName3 := testCaseID + "-syncset-3"
configMapName3 := testCaseID + "-configmap-3"
configMapNamespace3 := testCaseID + "-" + getRandomString() + "-hive-3"
applyBehavior = "CreateOrUpdate"
syncTemp3 := filepath.Join(testDataDir, "syncset-resource.yaml")
syncResource3 := syncSetResource{
name: syncSetName3,
namespace: oc.Namespace(),
namespace2: configMapNamespace3,
cdrefname: cdName,
cmname: configMapName3,
cmnamespace: configMapNamespace3,
ramode: resourceMode,
applybehavior: applyBehavior,
template: syncTemp3,
}
defer cleanupObjects(oc, objectTableRef{"SyncSet", oc.Namespace(), syncSetName3})
syncResource3.create(oc)
e2e.Logf("Check ConfigMap is created on target cluster and should not have the last-applied-config annotation.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName3, "-n", configMapNamespace3, "-o=jsonpath={.data}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "kubectl.kubernetes.io/last-applied-configuration", nok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName3, "-n", configMapNamespace3, "-o=jsonpath={.metadata.annotations}"}).check(oc)
e2e.Logf("Patch syncset resource.")
patchYaml = `
spec:
resources:
- apiVersion: v1
kind: Namespace
metadata:
name: ` + configMapNamespace3 + `
- apiVersion: v1
data:
foo2: bar2
kind: ConfigMap
metadata:
name: ` + configMapName3 + `
namespace: ` + configMapNamespace3
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"SyncSet", syncSetName3, "-n", oc.Namespace(), "--type", "merge", "-p", patchYaml}).check(oc)
e2e.Logf("Check data field in ConfigMap on target cluster should update and contain both foo and foo2.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar","foo2":"bar2"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName3, "-n", configMapNamespace3, "-o=jsonpath={.data}"}).check(oc)
e2e.Logf("Patch syncset resource.")
patchYaml = `
spec:
resources:
- apiVersion: v1
kind: Namespace
metadata:
name: ` + configMapNamespace3 + `
- apiVersion: v1
data:
foo: bar-test
foo3: bar3
kind: ConfigMap
metadata:
name: ` + configMapName3 + `
namespace: ` + configMapNamespace3
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"SyncSet", syncSetName3, "-n", oc.Namespace(), "--type", "merge", "-p", patchYaml}).check(oc)
e2e.Logf("Check data field in ConfigMap on target cluster should update, patch foo and add foo3.")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, `{"foo":"bar-test","foo2":"bar2","foo3":"bar3"}`, ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ConfigMap", configMapName3, "-n", configMapNamespace3, "-o=jsonpath={.data}"}).check(oc)
exutil.By("OCP-25333: Changing apiGroup for ClusterRoleBinding in SyncSet doesn't delete the CRB")
e2e.Logf("Create SyncSet with invalid apiGroup in resource CR.")
syncSetName4 := testCaseID + "-syncset-4"
syncsetYaml := `
apiVersion: hive.openshift.io/v1
kind: SyncSet
metadata:
name: ` + syncSetName4 + `
spec:
clusterDeploymentRefs:
- name: ` + cdName + `
- namespace: ` + oc.Namespace() + `
resourceApplyMode: Sync
resources:
- apiVersion: authorization.openshift.io/v1
kind: ClusterRoleBinding
metadata:
name: dedicated-admins-cluster
subjects:
- kind: Group
name: dedicated-admins
- kind: Group
name: system:serviceaccounts:dedicated-admin
roleRef:
name: dedicated-admins-cluster`
var filename = testCaseID + "-syncset-crb.yaml"
err = ioutil.WriteFile(filename, []byte(syncsetYaml), 0644)
defer os.Remove(filename)
o.Expect(err).NotTo(o.HaveOccurred())
output, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filename, "-n", oc.Namespace()).Output()
o.Expect(err).To(o.HaveOccurred())
o.Expect(output).To(o.ContainSubstring(`Invalid value: "authorization.openshift.io/v1": must use kubernetes group for this resource kind`))
e2e.Logf("oc create syncset failed, this is expected.")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, syncSetName4, nok, DefaultTimeout, []string{"SyncSet", "-n", oc.Namespace()}).check(oc)
}) | |||||
test case | openshift/openshift-tests-private | 34b7adb4-2873-41d5-ad27-3a962bbd9a71 | Author:mihuang-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Medium-35069-High-78499-Hive supports cluster hibernation for gcp[Serial] | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("Author:mihuang-NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Medium-35069-High-78499-Hive supports cluster hibernation for gcp[Serial]", func() {
testCaseID := "35069"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config GCP Install-Config Secret...")
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: filepath.Join(testDataDir, "gcp-install-config.yaml"),
}
exutil.By("Config GCP ClusterDeployment...")
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Check GCP ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
exutil.By("OCP-78499: Verify whether the discardLocalSsdOnHibernate field exists")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "false", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.platform.gcp.discardLocalSsdOnHibernate}"}).check(oc)
exutil.By("Check CD has Hibernating condition")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "False", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Hibernating")].status}`}).check(oc)
exutil.By("patch the CD to Hibernating...")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"powerState": "Hibernating"}}`}).check(oc)
e2e.Logf("OCP-78499: Wait until the CD successfully reaches the Hibernating state.")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "Hibernating", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.powerState}"}).check(oc)
e2e.Logf("Check cd's condition")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "True", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Hibernating")].status}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "False", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Ready")].status}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "True", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Unreachable")].status}`}).check(oc)
exutil.By("patch the CD to Running...")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "--type", "merge", "-p", `{"spec":{"powerState": "Running"}}`}).check(oc)
e2e.Logf("Wait for CD to be Running")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "Running", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.powerState}"}).check(oc)
e2e.Logf("Check cd's condition")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "False", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Hibernating")].status}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "True", ok, ClusterResumeTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Ready")].status}`}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "False", ok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), `-o=jsonpath={.status.conditions[?(@.type=="Unreachable")].status}`}).check(oc)
}) | |||||
test case | openshift/openshift-tests-private | 5d8cb1e7-154b-4479-93a8-d0c2c7c38b3f | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-Medium-52411-[GCP]Hive Machinepool test for autoscale [Serial] | ['"fmt"', '"io/ioutil"', '"os"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:lwan-Medium-52411-[GCP]Hive Machinepool test for autoscale [Serial]", func() {
testCaseID := "52411"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
oc.SetupProject()
exutil.By("Config GCP Install-Config Secret...")
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: filepath.Join(testDataDir, "gcp-install-config.yaml"),
}
exutil.By("Config GCP ClusterDeployment...")
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Create infra MachinePool ...")
inframachinepoolGCPTemp := filepath.Join(testDataDir, "machinepool-infra-gcp.yaml")
inframp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: inframachinepoolGCPTemp,
}
defer cleanupObjects(oc, objectTableRef{"MachinePool", oc.Namespace(), cdName + "-infra"})
inframp.create(oc)
exutil.By("Check if ClusterDeployment created successfully and become Provisioned")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
tmpDir := "/tmp/" + cdName + "-" + getRandomString()
err = os.MkdirAll(tmpDir, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(tmpDir)
getClusterKubeconfig(oc, cdName, oc.Namespace(), tmpDir)
kubeconfig := tmpDir + "/kubeconfig"
e2e.Logf("Patch static replicas to autoscaler")
exutil.By("OCP-52411: [GCP]Allow minReplicas autoscaling of MachinePools to be 0")
e2e.Logf("Check hive allow set minReplicas=0 without zone setting")
autoScalingMax := "4"
autoScalingMin := "0"
removeConfig := "[{\"op\": \"remove\", \"path\": \"/spec/replicas\"}]"
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "--type", "json", "-p", removeConfig}).check(oc)
autoscalConfig := fmt.Sprintf("{\"spec\": {\"autoscaling\": {\"maxReplicas\": %s, \"minReplicas\": %s}}}", autoScalingMax, autoScalingMin)
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "--type", "merge", "-p", autoscalConfig}).check(oc)
e2e.Logf("Check replicas is 0")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "0 0 0 0", ok, 2*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra", "-o=jsonpath={.items[*].status.replicas}"}).check(oc)
e2e.Logf("Check hive allow set minReplicas=0 within zone setting")
cleanupObjects(oc, objectTableRef{"MachinePool", oc.Namespace(), cdName + "-infra"})
infra2MachinepoolYaml := `
apiVersion: hive.openshift.io/v1
kind: MachinePool
metadata:
name: ` + cdName + `-infra2
namespace: ` + oc.Namespace() + `
spec:
autoscaling:
maxReplicas: 4
minReplicas: 0
clusterDeploymentRef:
name: ` + cdName + `
labels:
node-role.kubernetes.io: infra2
node-role.kubernetes.io/infra2: ""
name: infra2
platform:
gcp:
osDisk: {}
type: n1-standard-4
zones:
- ` + GCPRegion + `-a
- ` + GCPRegion + `-b
- ` + GCPRegion + `-c
- ` + GCPRegion + `-f`
var filename = testCaseID + "-machinepool-infra2.yaml"
err = ioutil.WriteFile(filename, []byte(infra2MachinepoolYaml), 0644)
defer os.Remove(filename)
o.Expect(err).NotTo(o.HaveOccurred())
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", filename, "--ignore-not-found").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", filename).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check replicas is 0")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "0 0 0 0", ok, 2*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra2", "-o=jsonpath={.items[*].status.replicas}"}).check(oc)
exutil.By("Check Hive supports autoscale for GCP")
patchYaml := `
spec:
scaleDown:
enabled: true
delayAfterAdd: 10s
delayAfterDelete: 10s
delayAfterFailure: 10s
unneededTime: 10s`
e2e.Logf("Add busybox in remote cluster and check machines will scale up to maxReplicas")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "ClusterAutoscaler", "default", "--type", "merge", "-p", patchYaml}).check(oc)
workloadYaml := filepath.Join(testDataDir, "workload.yaml")
defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("--kubeconfig="+kubeconfig, "-f", workloadYaml, "--ignore-not-found").Execute()
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("--kubeconfig="+kubeconfig, "-f", workloadYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "busybox", ok, DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "Deployment", "busybox", "-n", "default"}).check(oc)
e2e.Logf("Check replicas will scale up to maximum value")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "1 1 1 1", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra2", "-o=jsonpath={.items[*].status.replicas}"}).check(oc)
e2e.Logf("Delete busybox in remote cluster and check machines will scale down to minReplicas %s", autoScalingMin)
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("--kubeconfig="+kubeconfig, "-f", workloadYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Check replicas will scale down to minimum value")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "0 0 0 0", ok, 5*DefaultTimeout, []string{"--kubeconfig=" + kubeconfig, "MachineSet", "-n", "openshift-machine-api", "-l", "hive.openshift.io/machine-pool=infra2", "-o=jsonpath={.items[*].status.replicas}"}).check(oc)
}) | |||||
test case | openshift/openshift-tests-private | d186ec19-a3c5-4c75-b219-f445af3b52c7 | NonHyperShiftHOST-NonPreRelease-Longduration-ConnectedOnly-Author:lwan-Medium-46729-[HIVE]Support overriding installer image [Serial] | ['"fmt"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("NonHyperShiftHOST-NonPreRelease-Longduration-ConnectedOnly-Author:lwan-Medium-46729-[HIVE]Support overriding installer image [Serial]", func() {
testCaseID := "46729"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
imageSetName := cdName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and gcp-credentials to target namespace for the clusterdeployment
exutil.By("Copy GCP platform credentials...")
createGCPCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create GCP Install-Config Secret...")
installConfigTemp := filepath.Join(testDataDir, "gcp-install-config.yaml")
installConfigSecretName := cdName + "-install-config"
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: installConfigSecretName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: installConfigTemp,
}
defer cleanupObjects(oc, objectTableRef{"secret", oc.Namespace(), installConfigSecretName})
installConfigSecret.create(oc)
exutil.By("Create GCP ClusterDeployment...")
clusterTemp := filepath.Join(testDataDir, "clusterdeployment-gcp.yaml")
clusterVersion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion/version", "-o=jsonpath={.status.desired.version}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(clusterVersion).NotTo(o.BeEmpty())
installerImageForOverride, err := getPullSpec(oc, "installer", clusterVersion)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(installerImageForOverride).NotTo(o.BeEmpty())
e2e.Logf("ClusterVersion is %s, installerImageForOverride is %s", clusterVersion, installerImageForOverride)
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: imageSetName,
installConfigSecret: installConfigSecretName,
pullSecretRef: PullSecret,
installerImageOverride: installerImageForOverride,
installAttemptsLimit: 3,
template: clusterTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterDeployment", oc.Namespace(), cdName})
cluster.create(oc)
exutil.By("Check installer image is overrided via \"installerImageOverride\" field")
e2e.Logf("Check cd .status.installerImage")
newCheck("expect", "get", asAdmin, withoutNamespace, compare, installerImageForOverride, ok, 2*DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.installerImage}"}).check(oc)
e2e.Logf("Check Installer commitID in provision pod log matches commitID from overrided Installer image")
commitID, err := getCommitID(oc, "\" installer \"", clusterVersion)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(commitID).NotTo(o.BeEmpty())
e2e.Logf("Installer commitID is %s", commitID)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "", nok, DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.provisionRef.name}"}).check(oc)
provisionName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.provisionRef.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
newCheck("expect", "logs", asAdmin, withoutNamespace, contain, commitID, ok, DefaultTimeout, []string{"-n", oc.Namespace(), fmt.Sprintf("jobs/%s-provision", provisionName), "-c", "hive"}).check(oc)
}) | |||||
test case | openshift/openshift-tests-private | e820b564-b1d4-472a-80ea-f9e1863d252e | NonHyperShiftHOST-NonPreRelease-Longduration-ConnectedOnly-Author:lwan-Medium-45279-Test Metric for ClusterClaim[Serial] | ['"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("NonHyperShiftHOST-NonPreRelease-Longduration-ConnectedOnly-Author:lwan-Medium-45279-Test Metric for ClusterClaim[Serial]", func() {
// Expose Hive metrics, and neutralize the effect after finishing the test case
needRecover, prevConfig := false, ""
defer recoverClusterMonitoring(oc, &needRecover, &prevConfig)
exposeMetrics(oc, testDataDir, &needRecover, &prevConfig)
testCaseID := "45279"
poolName := "pool-" + testCaseID
imageSetName := poolName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
exutil.By("Check if ClusterImageSet was created successfully")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, imageSetName, ok, DefaultTimeout, []string{"ClusterImageSet"}).check(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and gcp-credentials to target namespace for the pool
exutil.By("Copy GCP platform credentials...")
createGCPCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create ClusterPool...")
poolTemp := filepath.Join(testDataDir, "clusterpool-gcp.yaml")
pool := gcpClusterPool{
name: poolName,
namespace: oc.Namespace(),
fake: "true",
baseDomain: GCPBaseDomain,
imageSetRef: imageSetName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
pullSecretRef: PullSecret,
size: 2,
maxSize: 2,
runningCount: 2,
maxConcurrent: 2,
hibernateAfter: "360m",
template: poolTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterPool", oc.Namespace(), poolName})
pool.create(oc)
exutil.By("Check if GCP ClusterPool created successfully and become ready")
//runningCount is 2 so pool status should be standby: 0, ready: 2
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "2", ok, DefaultTimeout, []string{"ClusterPool", poolName, "-n", oc.Namespace(), "-o=jsonpath={.status.ready}"}).check(oc)
exutil.By("Check if CD is Running")
cdListStr := getCDlistfromPool(oc, poolName)
var cdArray []string
cdArray = strings.Split(strings.TrimSpace(cdListStr), "\n")
for i := range cdArray {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "Running", ok, DefaultTimeout, []string{"ClusterDeployment", cdArray[i], "-n", cdArray[i]}).check(oc)
}
exutil.By("Create ClusterClaim...")
claimTemp := filepath.Join(testDataDir, "clusterclaim.yaml")
claimName1 := poolName + "-claim-1"
claim1 := clusterClaim{
name: claimName1,
namespace: oc.Namespace(),
clusterPoolName: poolName,
template: claimTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterClaim", oc.Namespace(), claimName1})
claim1.create(oc)
e2e.Logf("Check if ClusterClaim %s created successfully", claimName1)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, claimName1, ok, DefaultTimeout, []string{"ClusterClaim", "-n", oc.Namespace(), "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
exutil.By("Check Metrics for ClusterClaim...")
token, err := exutil.GetSAToken(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(token).NotTo(o.BeEmpty())
query1 := "hive_clusterclaim_assignment_delay_seconds_sum"
query2 := "hive_clusterclaim_assignment_delay_seconds_count"
query3 := "hive_clusterclaim_assignment_delay_seconds_bucket"
query := []string{query1, query2, query3}
exutil.By("Check hive metrics for clusterclaim exist")
checkMetricExist(oc, ok, token, thanosQuerierURL, query)
e2e.Logf("Check metric %s Value is 1", query2)
checkResourcesMetricValue(oc, poolName, oc.Namespace(), "1", token, thanosQuerierURL, query2)
exutil.By("Create another ClusterClaim...")
claimName2 := poolName + "-claim-2"
claim2 := clusterClaim{
name: claimName2,
namespace: oc.Namespace(),
clusterPoolName: poolName,
template: claimTemp,
}
defer cleanupObjects(oc, objectTableRef{"ClusterClaim", oc.Namespace(), claimName2})
claim2.create(oc)
e2e.Logf("Check if ClusterClaim %s created successfully", claimName2)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, claimName2, ok, DefaultTimeout, []string{"ClusterClaim", "-n", oc.Namespace(), "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
e2e.Logf("Check metric %s Value change to 2", query2)
checkResourcesMetricValue(oc, poolName, oc.Namespace(), "2", token, thanosQuerierURL, query2)
}) | |||||
test case | openshift/openshift-tests-private | 81661720-25f5-49b0-8ea7-909fb32681e7 | NonHyperShiftHOST-NonPreRelease-Longduration-ConnectedOnly-Author:mihuang-Medium-54463-Add cluster install success/fail metrics[Serial] | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("NonHyperShiftHOST-NonPreRelease-Longduration-ConnectedOnly-Author:mihuang-Medium-54463-Add cluster install success/fail metrics[Serial]", func() {
// Expose Hive metrics, and neutralize the effect after finishing the test case
needRecover, prevConfig := false, ""
defer recoverClusterMonitoring(oc, &needRecover, &prevConfig)
exposeMetrics(oc, testDataDir, &needRecover, &prevConfig)
testCaseID := "54463"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
imageSetName := cdName + "-imageset"
imageSetTemp := filepath.Join(testDataDir, "clusterimageset.yaml")
imageSet := clusterImageSet{
name: imageSetName,
releaseImage: testOCPImage,
template: imageSetTemp,
}
exutil.By("Create ClusterImageSet...")
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", imageSetName})
imageSet.create(oc)
oc.SetupProject()
//secrets can be accessed by pod in the same namespace, so copy pull-secret and gcp-credentials to target namespace for the clusterdeployment
exutil.By("Don't copy GCP platform credentials make install fail...")
//createGCPCreds(oc, oc.Namespace())
exutil.By("Copy pull-secret...")
createPullSecret(oc, oc.Namespace())
exutil.By("Create GCP Install-Config Secret...")
installConfigTemp := filepath.Join(testDataDir, "gcp-install-config.yaml")
installConfigSecretName := cdName + "-install-config"
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: installConfigSecretName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
template: installConfigTemp,
}
defer cleanupObjects(oc, objectTableRef{"secret", oc.Namespace(), installConfigSecretName})
installConfigSecret.create(oc)
exutil.By("Get SA token to check Metrics...")
token, err := exutil.GetSAToken(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(token).NotTo(o.BeEmpty())
var installAttemptsLimit = []int{3, 1}
for i := 0; i < len(installAttemptsLimit); i++ {
func() {
if installAttemptsLimit[i] == 3 {
exutil.By("Config GCP ClusterDeployment with installAttemptsLimit=3 and make install fail..")
} else {
exutil.By("Config GCP ClusterDeployment with installAttemptsLimit=1 and make install success..")
exutil.By("Copy GCP platform credentials make install success...")
createGCPCreds(oc, oc.Namespace())
}
cluster := gcpClusterDeployment{
fake: "true",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: installAttemptsLimit[i],
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanupObjects(oc, objectTableRef{"ClusterDeployment", oc.Namespace(), cdName})
cluster.create(oc)
if installAttemptsLimit[i] == 3 {
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "InstallAttemptsLimitReached", ok, 5*DefaultTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.status.conditions[?(@.type==\"ProvisionStopped\")].reason}"}).check(oc)
o.Expect(checkResourceNumber(oc, cdName, []string{"pods", "-A"})).To(o.Equal(3))
queryFailSum := "hive_cluster_deployment_install_failure_total_sum"
queryFailCount := "hive_cluster_deployment_install_failure_total_count"
queryFailBucket := "hive_cluster_deployment_install_failure_total_bucket"
queryFail := []string{queryFailSum, queryFailCount, queryFailBucket}
exutil.By("Check hive metrics for cd install fail")
checkMetricExist(oc, ok, token, thanosQuerierURL, queryFail)
e2e.Logf("Check metric %s with install_attempt = 2", queryFailCount)
checkResourcesMetricValue(oc, GCPRegion, HiveNamespace, "2", token, thanosQuerierURL, queryFailCount)
e2e.Logf("delete cd and create a success case")
} else {
exutil.By("Check GCP ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
querySuccSum := "hive_cluster_deployment_install_success_total_sum"
querySuccCount := "hive_cluster_deployment_install_success_total_count"
querySuccBucket := "hive_cluster_deployment_install_success_total_bucket"
querySuccess := []string{querySuccSum, querySuccCount, querySuccBucket}
exutil.By("Check hive metrics for cd installed successfully")
checkMetricExist(oc, ok, token, thanosQuerierURL, querySuccess)
e2e.Logf("Check metric %s with with install_attempt = 0", querySuccCount)
checkResourcesMetricValue(oc, GCPRegion, HiveNamespace, "0", token, thanosQuerierURL, querySuccCount)
}
}()
}
}) | |||||
test case | openshift/openshift-tests-private | f7e27481-f5fd-4bc6-b606-f6721580f60c | NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:jshu-High-68294-GCP Shared VPC support for MachinePool[Serial] | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_gcp.go | g.It("NonHyperShiftHOST-Longduration-NonPreRelease-ConnectedOnly-Author:jshu-High-68294-GCP Shared VPC support for MachinePool[Serial]", func() {
testCaseID := "68294"
cdName := "cluster-" + testCaseID + "-" + getRandomString()[:ClusterSuffixLen]
//oc.SetupProject()
exutil.By("Config GCP Install-Config Secret...")
projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure/cluster", "-o=jsonpath={.status.platformStatus.gcp.projectID}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(projectID).NotTo(o.BeEmpty())
installConfigSecret := gcpInstallConfig{
name1: cdName + "-install-config",
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
name2: cdName,
region: GCPRegion,
projectid: projectID,
computeSubnet: "installer-shared-vpc-subnet-2",
controlPlaneSubnet: "installer-shared-vpc-subnet-1",
network: "installer-shared-vpc",
networkProjectId: "openshift-qe-shared-vpc",
template: filepath.Join(testDataDir, "gcp-install-config-sharedvpc.yaml"),
}
exutil.By("Config GCP ClusterDeployment...")
cluster := gcpClusterDeployment{
fake: "false",
name: cdName,
namespace: oc.Namespace(),
baseDomain: GCPBaseDomain,
clusterName: cdName,
platformType: "gcp",
credRef: GCPCreds,
region: GCPRegion,
imageSetRef: cdName + "-imageset",
installConfigSecret: cdName + "-install-config",
pullSecretRef: PullSecret,
installAttemptsLimit: 3,
template: filepath.Join(testDataDir, "clusterdeployment-gcp.yaml"),
}
defer cleanCD(oc, cluster.name+"-imageset", oc.Namespace(), installConfigSecret.name1, cluster.name)
createCD(testDataDir, testOCPImage, oc, oc.Namespace(), installConfigSecret, cluster)
exutil.By("Create the infra MachinePool with the shared vpc...")
inframachinepoolGCPTemp := filepath.Join(testDataDir, "machinepool-infra-gcp-sharedvpc.yaml")
inframp := machinepool{
namespace: oc.Namespace(),
clusterName: cdName,
template: inframachinepoolGCPTemp,
}
defer cleanupObjects(oc,
objectTableRef{"MachinePool", oc.Namespace(), cdName + "-infra"},
)
inframp.create(oc)
exutil.By("Check GCP ClusterDeployment installed flag is true")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "true", ok, ClusterInstallTimeout, []string{"ClusterDeployment", cdName, "-n", oc.Namespace(), "-o=jsonpath={.spec.installed}"}).check(oc)
exutil.By("Check the infra MachinePool .status.replicas = 1")
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "1", ok, DefaultTimeout, []string{"MachinePool", cdName + "-infra", "-n", oc.Namespace(), "-o=jsonpath={.status.replicas}"}).check(oc)
}) | |||||
test | openshift/openshift-tests-private | 72f02d74-40a5-4714-acac-bec37a4c19cc | hive_util | import (
"bufio"
"context"
"crypto"
"encoding/json"
"fmt"
"math/rand"
"net"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/route53"
"github.com/aws/aws-sdk-go-v2/service/route53/types"
"github.com/3th1nk/cidr"
"gopkg.in/yaml.v3"
legoroute53 "github.com/go-acme/lego/v4/providers/dns/route53"
"github.com/go-acme/lego/v4/registration"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | package hive
import (
"bufio"
"context"
"crypto"
"encoding/json"
"fmt"
"math/rand"
"net"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/route53"
"github.com/aws/aws-sdk-go-v2/service/route53/types"
"github.com/3th1nk/cidr"
"gopkg.in/yaml.v3"
legoroute53 "github.com/go-acme/lego/v4/providers/dns/route53"
"github.com/go-acme/lego/v4/registration"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
type clusterMonitoringConfig struct {
enableUserWorkload bool
namespace string
template string
}
type hiveNameSpace struct {
name string
template string
}
type operatorGroup struct {
name string
namespace string
template string
}
type subscription struct {
name string
namespace string
channel string
approval string
operatorName string
sourceName string
sourceNamespace string
startingCSV string
currentCSV string
installedCSV string
template string
}
type hiveconfig struct {
logLevel string
targetNamespace string
template string
}
type clusterImageSet struct {
name string
releaseImage string
template string
}
type clusterPool struct {
name string
namespace string
fake string
baseDomain string
imageSetRef string
platformType string
credRef string
region string
pullSecretRef string
size int
maxSize int
runningCount int
maxConcurrent int
hibernateAfter string
template string
}
type clusterClaim struct {
name string
namespace string
clusterPoolName string
template string
}
type installConfig struct {
name1 string
namespace string
baseDomain string
name2 string
region string
template string
publish string
vmType string
arch string
credentialsMode string
internalJoinSubnet string
}
type installConfigPrivateLink struct {
name1 string
namespace string
baseDomain string
name2 string
region string
template string
publish string
vmType string
arch string
credentialsMode string
internalJoinSubnet string
privateSubnetId1 string
privateSubnetId2 string
privateSubnetId3 string
machineNetworkCidr string
}
type clusterDeployment struct {
fake string
installerType string
name string
namespace string
baseDomain string
clusterName string
manageDNS bool
platformType string
credRef string
region string
imageSetRef string
installConfigSecret string
pullSecretRef string
installAttemptsLimit int
customizedTag string
template string
}
type clusterDeploymentAdopt struct {
name string
namespace string
baseDomain string
adminKubeconfigRef string
clusterID string
infraID string
clusterName string
manageDNS bool
platformType string
credRef string
region string
pullSecretRef string
preserveOnDelete bool
template string
}
type clusterDeploymentAssumeRole struct {
fake string
installerType string
name string
namespace string
baseDomain string
boundServiceAccountSigningKeySecretRef string
roleARN string
externalID string
clusterName string
manageDNS bool
platformType string
region string
manifestsSecretRef string
imageSetRef string
installConfigSecret string
pullSecretRef string
installAttemptsLimit int
template string
}
type clusterDeploymentPrivateLink struct {
fake string
name string
namespace string
baseDomain string
clusterName string
manageDNS bool
credRef string
region string
imageSetRef string
installConfigSecret string
pullSecretRef string
installAttemptsLimit int
template string
}
type machinepool struct {
clusterName string
namespace string
iops int
template string
authentication string
gcpSecureBoot string
networkProjectID string
customizedTag string
}
type syncSetResource struct {
name string
namespace string
namespace2 string
cdrefname string
ramode string
applybehavior string
cmname string
cmnamespace string
template string
}
type syncSetPatch struct {
name string
namespace string
cdrefname string
cmname string
cmnamespace string
pcontent string
patchType string
template string
}
type syncSetSecret struct {
name string
namespace string
cdrefname string
sname string
snamespace string
tname string
tnamespace string
template string
}
type objectTableRef struct {
kind string
namespace string
name string
}
// Azure
type azureInstallConfig struct {
name1 string
namespace string
baseDomain string
name2 string
resGroup string
azureType string
region string
template string
}
type azureClusterDeployment struct {
fake string
copyCliDomain string
name string
namespace string
baseDomain string
clusterName string
platformType string
credRef string
region string
resGroup string
azureType string
imageSetRef string
installConfigSecret string
installerImageOverride string
pullSecretRef string
template string
}
type azureClusterPool struct {
name string
namespace string
fake string
baseDomain string
imageSetRef string
platformType string
credRef string
region string
resGroup string
pullSecretRef string
size int
maxSize int
runningCount int
maxConcurrent int
hibernateAfter string
template string
}
// GCP
type gcpInstallConfig struct {
name1 string
namespace string
baseDomain string
name2 string
region string
projectid string
template string
secureBoot string
computeSubnet string
controlPlaneSubnet string
network string
networkProjectId string
}
type gcpClusterDeployment struct {
fake string
name string
namespace string
baseDomain string
clusterName string
platformType string
credRef string
region string
imageSetRef string
installConfigSecret string
pullSecretRef string
installerImageOverride string
installAttemptsLimit int
template string
}
type gcpClusterPool struct {
name string
namespace string
fake string
baseDomain string
imageSetRef string
platformType string
credRef string
region string
pullSecretRef string
size int
maxSize int
runningCount int
maxConcurrent int
hibernateAfter string
template string
}
// vSphere
type vSphereInstallConfig struct {
secretName string
secretNs string
baseDomain string
icName string
machineNetwork string
apiVip string
cluster string
datacenter string
datastore string
ingressVip string
network string
password string
username string
vCenter string
template string
}
type vSphereClusterDeployment struct {
fake bool
name string
namespace string
baseDomain string
manageDns bool
clusterName string
certRef string
cluster string
credRef string
datacenter string
datastore string
network string
vCenter string
imageSetRef string
installConfigSecret string
pullSecretRef string
installAttemptsLimit int
template string
}
type prometheusQueryResult struct {
Data struct {
Result []struct {
Metric struct {
Name string `json:"__name__"`
ClusterpoolName string `json:"clusterpool_name"`
ClusterpoolNamespace string `json:"clusterpool_namespace"`
ClusterDeployment string `json:"cluster_deployment"`
ExportedNamespace string `json:"exported_namespace"`
ClusterType string `json:"cluster_type"`
ClusterVersion string `json:"cluster_version"`
InstallAttempt string `json:"install_attempt"`
Platform string `json:"platform"`
Region string `json:"region"`
Prometheus string `json:"prometheus"`
Condition string `json:"condition"`
Reason string `json:"reason"`
Endpoint string `json:"endpoint"`
Instance string `json:"instance"`
Job string `json:"job"`
Namespace string `json:"namespace"`
Pod string `json:"pod"`
Workers string `json:"workers"`
Service string `json:"service"`
} `json:"metric"`
Value []interface{} `json:"value"`
} `json:"result"`
ResultType string `json:"resultType"`
} `json:"data"`
Status string `json:"status"`
}
// This type is defined to avoid requiring openshift/installer types
// which bring in quite a few dependencies, making this repo unnecessarily
// difficult to maintain.
type minimalInstallConfig struct {
Networking struct {
MachineNetwork []struct {
CIDR string `yaml:"cidr"`
} `yaml:"machineNetwork"`
} `yaml:"networking"`
}
type legoUser struct {
Email string
Registration *registration.Resource
key crypto.PrivateKey
}
type testEnv string
// General configurations
const (
pemX509CertPattern = "-----BEGIN CERTIFICATE-----\\n([A-Za-z0-9+/=\\n]+)\\n-----END CERTIFICATE-----"
)
// Installer Configurations
const (
PublishExternal = "External"
PublishInternal = "Internal"
AWSVmTypeARM64 = "m6g.xlarge"
AWSVmTypeAMD64 = "m6i.xlarge"
archARM64 = "arm64"
archAMD64 = "amd64"
defaultAWSInternalJoinSubnet = "100.64.0.0/16"
defaultAWSMachineNetworkCidr = "10.0.0.0/16"
)
// Monitoring configurations
const (
PrometheusURL = "https://prometheus-k8s.openshift-monitoring.svc:9091/api/v1/query?query="
thanosQuerierURL = "https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query?query="
)
// Hive Configurations
const (
HiveNamespace = "hive" //Hive Namespace
PullSecret = "pull-secret"
hiveAdditionalCASecret = "hive-additional-ca"
HiveImgRepoOnQuay = "app-sre"
ClusterInstallTimeout = 3600
DefaultTimeout = 120
WaitingForClusterOperatorsTimeout = 600
FakeClusterInstallTimeout = 600
ClusterResumeTimeout = 1200
ClusterUninstallTimeout = 1800
HibernateAfterTimer = 300
ClusterSuffixLen = 4
LogsLimitLen = 1024
HiveManagedDNS = "hivemanageddns" //for all manage DNS Domain
)
// Test environments
const (
testEnvLocal testEnv = "Local"
testEnvJenkins testEnv = "Jenkins"
testEnvCI testEnv = "CI"
)
// AWS Configurations
const (
AWSBaseDomain = "qe.devcluster.openshift.com" //AWS BaseDomain
AWSRegion = "us-east-2"
AWSRegion2 = "us-east-1"
AWSCreds = "aws-creds"
AWSCredsPattern = `\[default\]
aws_access_key_id = ([a-zA-Z0-9+/]+)
aws_secret_access_key = ([a-zA-Z0-9+/]+)`
AWSDefaultCDTag = "hive-qe-cd-tag" //Default customized userTag defined ClusterDeployment's spec
AWSDefaultMPTag = "hive-qe-mp-tag" //Default customized userTag defined MachinePool's spec
)
// Azure Configurations
const (
AzureClusterInstallTimeout = 4500
AzureBaseDomain = "qe.azure.devcluster.openshift.com" //Azure BaseDomain
AzureRegion = "centralus"
AzureRegion2 = "eastus"
AzureCreds = "azure-credentials"
AzureRESGroup = "os4-common"
AzurePublic = "AzurePublicCloud"
AzureGov = "AzureUSGovernmentCloud"
)
// GCP Configurations
const (
GCPBaseDomain = "qe.gcp.devcluster.openshift.com" //GCP BaseDomain
GCPBaseDomain2 = "qe1.gcp.devcluster.openshift.com"
GCPRegion = "us-central1"
GCPRegion2 = "us-east1"
GCPCreds = "gcp-credentials"
)
// VSphere configurations
const (
VSphereCreds = "vsphere-creds"
VSphereCerts = "vsphere-certs"
VSphereAWSCredsFilePathCI = "/var/run/vault/aws/.awscred"
VSphereNetworkPattern = "[a-zA-Z]+-[a-zA-Z]+-([\\d]+)"
VSphereLastCidrOctetMin = 3
VSphereLastCidrOctetMax = 49
)
func applyResourceFromTemplate(oc *exutil.CLI, parameters ...string) error {
var cfgFileJSON string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "-hive-resource-cfg.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
cfgFileJSON = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, "fail to create config file")
e2e.Logf("the file of resource is %s", cfgFileJSON)
defer os.Remove(cfgFileJSON)
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", cfgFileJSON).Execute()
}
func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
}
func (u *legoUser) GetEmail() string {
return u.Email
}
func (u *legoUser) GetRegistration() *registration.Resource {
return u.Registration
}
func (u *legoUser) GetPrivateKey() crypto.PrivateKey {
return u.key
}
func (cmc *clusterMonitoringConfig) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cmc.template, "-p", "ENABLEUSERWORKLOAD="+strconv.FormatBool(cmc.enableUserWorkload), "NAMESPACE="+cmc.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
// Create hive namespace if not exist
func (ns *hiveNameSpace) createIfNotExist(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ns.template, "-p", "NAME="+ns.name)
o.Expect(err).NotTo(o.HaveOccurred())
}
// Create operatorGroup for Hive if not exist
func (og *operatorGroup) createIfNotExist(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", og.template, "-p", "NAME="+og.name, "NAMESPACE="+og.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (sub *subscription) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", sub.template, "-p", "NAME="+sub.name, "NAMESPACE="+sub.namespace, "CHANNEL="+sub.channel,
"APPROVAL="+sub.approval, "OPERATORNAME="+sub.operatorName, "SOURCENAME="+sub.sourceName, "SOURCENAMESPACE="+sub.sourceNamespace, "STARTINGCSV="+sub.startingCSV)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(sub.approval, "Automatic") == 0 {
sub.findInstalledCSV(oc)
} else {
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "UpgradePending", ok, DefaultTimeout, []string{"sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.state}"}).check(oc)
}
}
// Create subscription for Hive if not exist and wait for resource is ready
func (sub *subscription) createIfNotExist(oc *exutil.CLI) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "-n", sub.namespace).Output()
if strings.Contains(output, "NotFound") || strings.Contains(output, "No resources") || err != nil {
e2e.Logf("No hive subscription, Create it.")
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", sub.template, "-p", "NAME="+sub.name, "NAMESPACE="+sub.namespace, "CHANNEL="+sub.channel,
"APPROVAL="+sub.approval, "OPERATORNAME="+sub.operatorName, "SOURCENAME="+sub.sourceName, "SOURCENAMESPACE="+sub.sourceNamespace, "STARTINGCSV="+sub.startingCSV)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(sub.approval, "Automatic") == 0 {
sub.findInstalledCSV(oc)
} else {
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "UpgradePending", ok, DefaultTimeout, []string{"sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.state}"}).check(oc)
}
//wait for pod running
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"pod", "--selector=control-plane=hive-operator", "-n",
sub.namespace, "-o=jsonpath={.items[0].status.phase}"}).check(oc)
//Check if need to replace with the latest Hive image
hiveDeployedImg, _, err := oc.
AsAdmin().
WithoutNamespace().
Run("get").
Args("csv", sub.installedCSV, "-n", sub.namespace,
"-o", "jsonpath={.spec.install.spec.deployments[0].spec.template.spec.containers[0].image}").
Outputs()
if err != nil {
e2e.Logf("Failed to get Hive image: %v", err)
} else {
e2e.Logf("Found Hive deployed image = %v", hiveDeployedImg)
latestHiveVer := getLatestHiveVersion()
if strings.Contains(hiveDeployedImg, latestHiveVer) {
e2e.Logf("The deployed Hive image is already the lastest.")
} else {
e2e.Logf("The deployed Hive image is NOT the lastest, patched to the latest version: %v", latestHiveVer)
patchYaml := `[{"op": "replace", "path": "/spec/install/spec/deployments/0/spec/template/spec/containers/0/image", "value": "quay.io/app-sre/hive:versiontobepatched"}]`
patchYaml = strings.Replace(patchYaml, "versiontobepatched", latestHiveVer, 1)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", sub.namespace, "csv", sub.installedCSV, "--type=json", "-p", patchYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
} else {
e2e.Logf("hive subscription already exists.")
}
}
func (sub *subscription) findInstalledCSV(oc *exutil.CLI) {
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "AtLatestKnown", ok, DefaultTimeout, []string{"sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.state}"}).check(oc)
installedCSV := getResource(oc, asAdmin, withoutNamespace, "sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.installedCSV}")
o.Expect(installedCSV).NotTo(o.BeEmpty())
if strings.Compare(sub.installedCSV, installedCSV) != 0 {
sub.installedCSV = installedCSV
}
e2e.Logf("the installed CSV name is %s", sub.installedCSV)
}
func (hc *hiveconfig) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", hc.template, "-p", "LOGLEVEL="+hc.logLevel, "TARGETNAMESPACE="+hc.targetNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
// Create hivconfig if not exist and wait for resource is ready
func (hc *hiveconfig) createIfNotExist(oc *exutil.CLI) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HiveConfig", "hive").Output()
if strings.Contains(output, "have a resource type") || err != nil {
e2e.Logf("No hivconfig, Create it.")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", hc.template, "-p", "LOGLEVEL="+hc.logLevel, "TARGETNAMESPACE="+hc.targetNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
//wait for pods running
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-clustersync", ok, WaitingForClusterOperatorsTimeout, []string{"pod", "--selector=control-plane=clustersync",
"-n", HiveNamespace, "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"pod", "--selector=control-plane=clustersync", "-n",
HiveNamespace, "-o=jsonpath={.items[0].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", ok, DefaultTimeout, []string{"pod", "--selector=control-plane=controller-manager",
"-n", HiveNamespace, "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"pod", "--selector=control-plane=controller-manager", "-n",
HiveNamespace, "-o=jsonpath={.items[0].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", ok, DefaultTimeout, []string{"pod", "--selector=app=hiveadmission",
"-n", HiveNamespace, "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running", ok, DefaultTimeout, []string{"pod", "--selector=app=hiveadmission", "-n",
HiveNamespace, "-o=jsonpath={.items[*].status.phase}"}).check(oc)
} else {
e2e.Logf("hivconfig already exists.")
}
}
func (imageset *clusterImageSet) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", imageset.template, "-p", "NAME="+imageset.name, "RELEASEIMAGE="+imageset.releaseImage)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (pool *clusterPool) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pool.template, "-p", "NAME="+pool.name, "NAMESPACE="+pool.namespace, "FAKE="+pool.fake, "BASEDOMAIN="+pool.baseDomain, "IMAGESETREF="+pool.imageSetRef, "PLATFORMTYPE="+pool.platformType, "CREDREF="+pool.credRef, "REGION="+pool.region, "PULLSECRETREF="+pool.pullSecretRef, "SIZE="+strconv.Itoa(pool.size), "MAXSIZE="+strconv.Itoa(pool.maxSize), "RUNNINGCOUNT="+strconv.Itoa(pool.runningCount), "MAXCONCURRENT="+strconv.Itoa(pool.maxConcurrent), "HIBERNATEAFTER="+pool.hibernateAfter)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (claim *clusterClaim) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", claim.template, "-p", "NAME="+claim.name, "NAMESPACE="+claim.namespace, "CLUSTERPOOLNAME="+claim.clusterPoolName)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (config *installConfig) create(oc *exutil.CLI) {
// Set default values
if config.publish == "" {
config.publish = PublishExternal
}
if config.vmType == "" {
config.vmType = AWSVmTypeAMD64
}
if config.arch == "" {
config.arch = archAMD64
}
parameters := []string{"--ignore-unknown-parameters=true", "-f", config.template, "-p", "NAME1=" + config.name1, "NAMESPACE=" + config.namespace, "BASEDOMAIN=" + config.baseDomain, "NAME2=" + config.name2, "REGION=" + config.region, "PUBLISH=" + config.publish, "VMTYPE=" + config.vmType, "ARCH=" + config.arch}
if len(config.credentialsMode) > 0 {
parameters = append(parameters, "CREDENTIALSMODE="+config.credentialsMode)
}
if len(config.internalJoinSubnet) == 0 {
parameters = append(parameters, "INTERNALJOINSUBNET="+defaultAWSInternalJoinSubnet)
} else {
parameters = append(parameters, "INTERNALJOINSUBNET="+config.internalJoinSubnet)
}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (config *installConfigPrivateLink) create(oc *exutil.CLI) {
// Set default values
if config.publish == "" {
config.publish = PublishExternal
}
if config.vmType == "" {
config.vmType = AWSVmTypeAMD64
}
if config.arch == "" {
config.arch = archAMD64
}
parameters := []string{"--ignore-unknown-parameters=true", "-f", config.template, "-p", "NAME1=" + config.name1, "NAMESPACE=" + config.namespace, "BASEDOMAIN=" + config.baseDomain, "NAME2=" + config.name2, "REGION=" + config.region, "PUBLISH=" + config.publish, "VMTYPE=" + config.vmType, "ARCH=" + config.arch}
if len(config.credentialsMode) > 0 {
parameters = append(parameters, "CREDENTIALSMODE="+config.credentialsMode)
}
if len(config.internalJoinSubnet) == 0 {
parameters = append(parameters, "INTERNALJOINSUBNET="+defaultAWSInternalJoinSubnet)
} else {
parameters = append(parameters, "INTERNALJOINSUBNET="+config.internalJoinSubnet)
}
if len(config.privateSubnetId1) > 0 {
parameters = append(parameters, "PRIVATESUBNETID1="+config.privateSubnetId1)
}
if len(config.privateSubnetId2) > 0 {
parameters = append(parameters, "PRIVATESUBNETID2="+config.privateSubnetId2)
}
if len(config.privateSubnetId3) > 0 {
parameters = append(parameters, "PRIVATESUBNETID3="+config.privateSubnetId3)
}
if len(config.machineNetworkCidr) == 0 {
parameters = append(parameters, "MACHINENETWORKCIDR="+defaultAWSMachineNetworkCidr)
} else {
parameters = append(parameters, "MACHINENETWORKCIDR="+config.machineNetworkCidr)
}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cluster *clusterDeployment) create(oc *exutil.CLI) {
parameters := []string{"--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "FAKE=" + cluster.fake, "NAME=" + cluster.name, "NAMESPACE=" + cluster.namespace, "BASEDOMAIN=" + cluster.baseDomain, "CLUSTERNAME=" + cluster.clusterName, "MANAGEDNS=" + strconv.FormatBool(cluster.manageDNS), "PLATFORMTYPE=" + cluster.platformType, "CREDREF=" + cluster.credRef, "REGION=" + cluster.region, "IMAGESETREF=" + cluster.imageSetRef, "INSTALLCONFIGSECRET=" + cluster.installConfigSecret, "PULLSECRETREF=" + cluster.pullSecretRef, "INSTALLATTEMPTSLIMIT=" + strconv.Itoa(cluster.installAttemptsLimit)}
if len(cluster.installerType) > 0 {
parameters = append(parameters, "INSTALLERTYPE="+cluster.installerType)
} else {
parameters = append(parameters, "INSTALLERTYPE=installer")
}
if len(cluster.customizedTag) > 0 {
parameters = append(parameters, "CUSTOMIZEDTAG="+cluster.customizedTag)
} else {
parameters = append(parameters, "CUSTOMIZEDTAG="+AWSDefaultCDTag)
}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cluster *clusterDeploymentAssumeRole) create(oc *exutil.CLI) {
parameters := []string{"--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "FAKE=" + cluster.fake, "INSTALLERTYPE=" + cluster.installerType, "NAME=" + cluster.name, "NAMESPACE=" + cluster.namespace, "BASEDOMAIN=" + cluster.baseDomain, "BOUND_SERVICE_ACCOUNT_SIGNING_KEY_SECRET_REF=" + cluster.boundServiceAccountSigningKeySecretRef, "ROLEARN=" + cluster.roleARN, "EXTERNALID=" + cluster.externalID, "CLUSTERNAME=" + cluster.clusterName, "MANAGEDNS=" + strconv.FormatBool(cluster.manageDNS), "PLATFORMTYPE=" + cluster.platformType, "REGION=" + cluster.region, "MANIFESTS_SECRET_REF=" + cluster.manifestsSecretRef, "IMAGESETREF=" + cluster.imageSetRef, "INSTALLCONFIGSECRET=" + cluster.installConfigSecret, "PULLSECRETREF=" + cluster.pullSecretRef, "INSTALLATTEMPTSLIMIT=" + strconv.Itoa(cluster.installAttemptsLimit)}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cluster *clusterDeploymentAdopt) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "NAME="+cluster.name, "NAMESPACE="+cluster.namespace, "BASEDOMAIN="+cluster.baseDomain, "ADMINKUBECONFIGREF="+cluster.adminKubeconfigRef, "CLUSTERID="+cluster.clusterID, "INFRAID="+cluster.infraID, "CLUSTERNAME="+cluster.clusterName, "MANAGEDNS="+strconv.FormatBool(cluster.manageDNS), "PLATFORMTYPE="+cluster.platformType, "CREDREF="+cluster.credRef, "REGION="+cluster.region, "PULLSECRETREF="+cluster.pullSecretRef, "PRESERVEONDELETE="+strconv.FormatBool(cluster.preserveOnDelete))
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cluster *clusterDeploymentPrivateLink) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "FAKE="+cluster.fake, "NAME="+cluster.name, "NAMESPACE="+cluster.namespace, "BASEDOMAIN="+cluster.baseDomain, "CLUSTERNAME="+cluster.clusterName, "MANAGEDNS="+strconv.FormatBool(cluster.manageDNS), "CREDREF="+cluster.credRef, "REGION="+cluster.region, "IMAGESETREF="+cluster.imageSetRef, "INSTALLCONFIGSECRET="+cluster.installConfigSecret, "PULLSECRETREF="+cluster.pullSecretRef, "INSTALLATTEMPTSLIMIT="+strconv.Itoa(cluster.installAttemptsLimit))
o.Expect(err).NotTo(o.HaveOccurred())
}
func (machine *machinepool) create(oc *exutil.CLI) {
// Set default values
if machine.gcpSecureBoot == "" {
machine.gcpSecureBoot = "Disabled"
}
if machine.customizedTag == "" {
machine.customizedTag = AWSDefaultMPTag
}
parameters := []string{"--ignore-unknown-parameters=true", "-f", machine.template, "-p", "CLUSTERNAME=" + machine.clusterName, "NAMESPACE=" + machine.namespace, "IOPS=" + strconv.Itoa(machine.iops), "AUTHENTICATION=" + machine.authentication, "SECUREBOOT=" + machine.gcpSecureBoot, "CUSTOMIZEDTAG=" + machine.customizedTag}
if len(machine.networkProjectID) > 0 {
parameters = append(parameters, "NETWORKPROJECTID="+machine.networkProjectID)
}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (syncresource *syncSetResource) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", syncresource.template, "-p", "NAME="+syncresource.name, "NAMESPACE="+syncresource.namespace, "CDREFNAME="+syncresource.cdrefname, "NAMESPACE2="+syncresource.namespace2, "RAMODE="+syncresource.ramode, "APPLYBEHAVIOR="+syncresource.applybehavior, "CMNAME="+syncresource.cmname, "CMNAMESPACE="+syncresource.cmnamespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (syncpatch *syncSetPatch) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", syncpatch.template, "-p", "NAME="+syncpatch.name, "NAMESPACE="+syncpatch.namespace, "CDREFNAME="+syncpatch.cdrefname, "CMNAME="+syncpatch.cmname, "CMNAMESPACE="+syncpatch.cmnamespace, "PCONTENT="+syncpatch.pcontent, "PATCHTYPE="+syncpatch.patchType)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (syncsecret *syncSetSecret) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", syncsecret.template, "-p", "NAME="+syncsecret.name, "NAMESPACE="+syncsecret.namespace, "CDREFNAME="+syncsecret.cdrefname, "SNAME="+syncsecret.sname, "SNAMESPACE="+syncsecret.snamespace, "TNAME="+syncsecret.tname, "TNAMESPACE="+syncsecret.tnamespace)
o.Expect(err).NotTo(o.HaveOccurred())
}
// Azure
func (config *azureInstallConfig) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", config.template, "-p", "NAME1="+config.name1, "NAMESPACE="+config.namespace, "BASEDOMAIN="+config.baseDomain, "NAME2="+config.name2, "RESGROUP="+config.resGroup, "AZURETYPE="+config.azureType, "REGION="+config.region)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cluster *azureClusterDeployment) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "FAKE="+cluster.fake, "COPYCLIDOMAIN="+cluster.copyCliDomain, "NAME="+cluster.name, "NAMESPACE="+cluster.namespace, "BASEDOMAIN="+cluster.baseDomain, "CLUSTERNAME="+cluster.clusterName, "PLATFORMTYPE="+cluster.platformType, "CREDREF="+cluster.credRef, "REGION="+cluster.region, "RESGROUP="+cluster.resGroup, "AZURETYPE="+cluster.azureType, "IMAGESETREF="+cluster.imageSetRef, "INSTALLCONFIGSECRET="+cluster.installConfigSecret, "INSTALLERIMAGEOVERRIDE="+cluster.installerImageOverride, "PULLSECRETREF="+cluster.pullSecretRef)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (pool *azureClusterPool) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pool.template, "-p", "NAME="+pool.name, "NAMESPACE="+pool.namespace, "FAKE="+pool.fake, "BASEDOMAIN="+pool.baseDomain, "IMAGESETREF="+pool.imageSetRef, "PLATFORMTYPE="+pool.platformType, "CREDREF="+pool.credRef, "REGION="+pool.region, "RESGROUP="+pool.resGroup, "PULLSECRETREF="+pool.pullSecretRef, "SIZE="+strconv.Itoa(pool.size), "MAXSIZE="+strconv.Itoa(pool.maxSize), "RUNNINGCOUNT="+strconv.Itoa(pool.runningCount), "MAXCONCURRENT="+strconv.Itoa(pool.maxConcurrent), "HIBERNATEAFTER="+pool.hibernateAfter)
o.Expect(err).NotTo(o.HaveOccurred())
}
// GCP
func (config *gcpInstallConfig) create(oc *exutil.CLI) {
// Set default values
if config.secureBoot == "" {
config.secureBoot = "Disabled"
}
parameters := []string{"--ignore-unknown-parameters=true", "-f", config.template, "-p", "NAME1=" + config.name1, "NAMESPACE=" + config.namespace, "BASEDOMAIN=" + config.baseDomain, "NAME2=" + config.name2, "REGION=" + config.region, "PROJECTID=" + config.projectid, "SECUREBOOT=" + config.secureBoot}
if len(config.computeSubnet) > 0 {
parameters = append(parameters, "COMPUTESUBNET="+config.computeSubnet)
}
if len(config.controlPlaneSubnet) > 0 {
parameters = append(parameters, "CONTROLPLANESUBNET="+config.controlPlaneSubnet)
}
if len(config.network) > 0 {
parameters = append(parameters, "NETWORK="+config.network)
}
if len(config.networkProjectId) > 0 {
parameters = append(parameters, "NETWORKPROJECTID="+config.networkProjectId)
}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cluster *gcpClusterDeployment) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "FAKE="+cluster.fake, "NAME="+cluster.name, "NAMESPACE="+cluster.namespace, "BASEDOMAIN="+cluster.baseDomain, "CLUSTERNAME="+cluster.clusterName, "PLATFORMTYPE="+cluster.platformType, "CREDREF="+cluster.credRef, "REGION="+cluster.region, "IMAGESETREF="+cluster.imageSetRef, "INSTALLCONFIGSECRET="+cluster.installConfigSecret, "PULLSECRETREF="+cluster.pullSecretRef, "INSTALLERIMAGEOVERRIDE="+cluster.installerImageOverride, "INSTALLATTEMPTSLIMIT="+strconv.Itoa(cluster.installAttemptsLimit))
o.Expect(err).NotTo(o.HaveOccurred())
}
func (pool *gcpClusterPool) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pool.template, "-p", "NAME="+pool.name, "NAMESPACE="+pool.namespace, "FAKE="+pool.fake, "BASEDOMAIN="+pool.baseDomain, "IMAGESETREF="+pool.imageSetRef, "PLATFORMTYPE="+pool.platformType, "CREDREF="+pool.credRef, "REGION="+pool.region, "PULLSECRETREF="+pool.pullSecretRef, "SIZE="+strconv.Itoa(pool.size), "MAXSIZE="+strconv.Itoa(pool.maxSize), "RUNNINGCOUNT="+strconv.Itoa(pool.runningCount), "MAXCONCURRENT="+strconv.Itoa(pool.maxConcurrent), "HIBERNATEAFTER="+pool.hibernateAfter)
o.Expect(err).NotTo(o.HaveOccurred())
}
// vSphere
func (ic *vSphereInstallConfig) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ic.template,
"-p", "SECRETNAME="+ic.secretName, "SECRETNS="+ic.secretNs, "BASEDOMAIN="+ic.baseDomain,
"ICNAME="+ic.icName, "MACHINENETWORK="+ic.machineNetwork, "APIVIP="+ic.apiVip, "CLUSTER="+ic.cluster,
"DATACENTER="+ic.datacenter, "DATASTORE="+ic.datastore, "INGRESSVIP="+ic.ingressVip, "NETWORK="+ic.network,
"PASSWORD="+ic.password, "USERNAME="+ic.username, "VCENTER="+ic.vCenter)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cluster *vSphereClusterDeployment) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cluster.template,
"-p", "FAKE="+strconv.FormatBool(cluster.fake), "NAME="+cluster.name, "NAMESPACE="+cluster.namespace,
"BASEDOMAIN="+cluster.baseDomain, "MANAGEDNS="+strconv.FormatBool(cluster.manageDns),
"CLUSTERNAME="+cluster.clusterName, "CERTREF="+cluster.certRef, "CLUSTER="+cluster.cluster,
"CREDREF="+cluster.credRef, "DATACENTER="+cluster.datacenter, "DATASTORE="+cluster.datastore,
"NETWORK="+cluster.network, "VCENTER="+cluster.vCenter, "IMAGESETREF="+cluster.imageSetRef,
"INSTALLCONFIGSECRET="+cluster.installConfigSecret, "PULLSECRETREF="+cluster.pullSecretRef,
"INSTALLATTEMPTSLIMIT="+strconv.Itoa(cluster.installAttemptsLimit))
o.Expect(err).NotTo(o.HaveOccurred())
}
func getResource(oc *exutil.CLI, asAdmin bool, withoutNamespace bool, parameters ...string) string {
var result string
err := wait.Poll(3*time.Second, 120*time.Second, func() (bool, error) {
output, err := doAction(oc, "get", asAdmin, withoutNamespace, parameters...)
if err != nil {
e2e.Logf("the get error is %v, and try next", err)
return false, nil
}
result = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("cat not get %v without empty", parameters))
e2e.Logf("the result of queried resource:%v", result)
return result
}
func doAction(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) {
if asAdmin && withoutNamespace {
return oc.AsAdmin().WithoutNamespace().Run(action).Args(parameters...).Output()
}
if asAdmin && !withoutNamespace {
return oc.AsAdmin().Run(action).Args(parameters...).Output()
}
if !asAdmin && withoutNamespace {
return oc.WithoutNamespace().Run(action).Args(parameters...).Output()
}
if !asAdmin && !withoutNamespace {
return oc.Run(action).Args(parameters...).Output()
}
return "", nil
}
// Check the resource meets the expect
// parameter method: expect or present
// parameter action: get, patch, delete, ...
// parameter executor: asAdmin or not
// parameter inlineNamespace: withoutNamespace or not
// parameter expectAction: Compare or not
// parameter expectContent: expected string
// parameter expect: ok, expected to have expectContent; nok, not expected to have expectContent
// parameter timeout: use CLUSTER_INSTALL_TIMEOUT de default, and CLUSTER_INSTALL_TIMEOUT, CLUSTER_RESUME_TIMEOUT etc in different scenarios
// parameter resource: resource
func newCheck(method string, action string, executor bool, inlineNamespace bool, expectAction bool,
expectContent string, expect bool, timeout int, resource []string) checkDescription {
return checkDescription{
method: method,
action: action,
executor: executor,
inlineNamespace: inlineNamespace,
expectAction: expectAction,
expectContent: expectContent,
expect: expect,
timeout: timeout,
resource: resource,
}
}
type checkDescription struct {
method string
action string
executor bool
inlineNamespace bool
expectAction bool
expectContent string
expect bool
timeout int
resource []string
}
const (
asAdmin = true
withoutNamespace = true
requireNS = false
compare = true
contain = false
present = true
notPresent = false
ok = true
nok = false
)
func (ck checkDescription) check(oc *exutil.CLI) {
switch ck.method {
case "present":
ok := isPresentResource(oc, ck.action, ck.executor, ck.inlineNamespace, ck.expectAction, ck.resource...)
o.Expect(ok).To(o.BeTrue())
case "expect":
err := expectedResource(oc, ck.action, ck.executor, ck.inlineNamespace, ck.expectAction, ck.expectContent, ck.expect, ck.timeout, ck.resource...)
exutil.AssertWaitPollNoErr(err, "can not get expected result")
default:
err := fmt.Errorf("unknown method")
o.Expect(err).NotTo(o.HaveOccurred())
}
}
func isPresentResource(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, present bool, parameters ...string) bool {
parameters = append(parameters, "--ignore-not-found")
err := wait.Poll(3*time.Second, 60*time.Second, func() (bool, error) {
output, err := doAction(oc, action, asAdmin, withoutNamespace, parameters...)
if err != nil {
e2e.Logf("the get error is %v, and try next", err)
return false, nil
}
if !present && strings.Compare(output, "") == 0 {
return true, nil
}
if present && strings.Compare(output, "") != 0 {
return true, nil
}
return false, nil
})
return err == nil
}
func expectedResource(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, isCompare bool, content string, expect bool, timeout int, parameters ...string) error {
cc := func(a, b string, ic bool) bool {
bs := strings.Split(b, "+2+")
ret := false
for _, s := range bs {
if (ic && strings.Compare(a, s) == 0) || (!ic && strings.Contains(a, s)) {
ret = true
}
}
return ret
}
var interval, inputTimeout time.Duration
if timeout >= ClusterInstallTimeout {
inputTimeout = time.Duration(timeout/60) * time.Minute
interval = 3 * time.Minute
} else {
inputTimeout = time.Duration(timeout) * time.Second
interval = time.Duration(timeout/60) * time.Second
}
return wait.Poll(interval, inputTimeout, func() (bool, error) {
output, err := doAction(oc, action, asAdmin, withoutNamespace, parameters...)
if err != nil {
e2e.Logf("the get error is %v, and try next", err)
return false, nil
}
e2e.Logf("the queried resource:%s", output)
if isCompare && expect && cc(output, content, isCompare) {
e2e.Logf("the output %s matches one of the content %s, expected", output, content)
return true, nil
}
if isCompare && !expect && !cc(output, content, isCompare) {
e2e.Logf("the output %s does not match the content %s, expected", output, content)
return true, nil
}
if !isCompare && expect && cc(output, content, isCompare) {
e2e.Logf("the output %s contains one of the content %s, expected", output, content)
return true, nil
}
if !isCompare && !expect && !cc(output, content, isCompare) {
e2e.Logf("the output %s does not contain the content %s, expected", output, content)
return true, nil
}
return false, nil
})
}
// clean up the object resource
func cleanupObjects(oc *exutil.CLI, objs ...objectTableRef) {
for _, v := range objs {
e2e.Logf("Start to remove: %v", v)
//Print out debugging info if CD installed is false
var provisionPodOutput, installedFlag string
if v.kind == "ClusterPool" {
if v.namespace != "" {
cdListStr := getCDlistfromPool(oc, v.name)
var cdArray []string
cdArray = strings.Split(strings.TrimSpace(cdListStr), "\n")
for i := range cdArray {
installedFlag, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterDeployment", "-n", cdArray[i], cdArray[i], "-o=jsonpath={.spec.installed}").Output()
if installedFlag == "false" {
failedCdName := cdArray[i]
e2e.Logf("failedCdName is %s", failedCdName)
//At present, the maximum size of clusterpool in auto test is 2, we can print them all to get more information if cd installed is false
printStatusConditions(oc, "ClusterDeployment", failedCdName, failedCdName)
printProvisionPodLogs(oc, provisionPodOutput, failedCdName)
}
}
}
} else if v.kind == "ClusterDeployment" {
installedFlag, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(v.kind, "-n", v.namespace, v.name, "-o=jsonpath={.spec.installed}").Output()
if installedFlag == "false" {
printStatusConditions(oc, v.kind, v.namespace, v.name)
printProvisionPodLogs(oc, provisionPodOutput, v.namespace)
}
}
if v.namespace != "" {
oc.AsAdmin().WithoutNamespace().Run("delete").Args(v.kind, "-n", v.namespace, v.name, "--ignore-not-found").Output()
} else {
oc.AsAdmin().WithoutNamespace().Run("delete").Args(v.kind, v.name, "--ignore-not-found").Output()
}
//For ClusterPool or ClusterDeployment, need to wait ClusterDeployment delete done
if v.kind == "ClusterPool" || v.kind == "ClusterDeployment" {
e2e.Logf("Wait ClusterDeployment delete done for %s", v.name)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, v.name, nok, ClusterUninstallTimeout, []string{"ClusterDeployment", "-A"}).check(oc)
}
}
}
// print out the status conditions
func printStatusConditions(oc *exutil.CLI, kind, namespace, name string) {
statusConditions, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(kind, "-n", namespace, name, "-o=jsonpath={.status.conditions}").Output()
if len(statusConditions) <= LogsLimitLen {
e2e.Logf("statusConditions is %s", statusConditions)
} else {
e2e.Logf("statusConditions is %s", statusConditions[:LogsLimitLen])
}
}
// print out provision pod logs
func printProvisionPodLogs(oc *exutil.CLI, provisionPodOutput, namespace string) {
provisionPodOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "hive.openshift.io/job-type=provision", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output()
e2e.Logf("provisionPodOutput is %s", provisionPodOutput)
//if err == nil , print out provision pod logs
if err == nil && len(strings.TrimSpace(provisionPodOutput)) > 0 {
var provisionPod []string
provisionPod = strings.Split(strings.TrimSpace(provisionPodOutput), " ")
e2e.Logf("provisionPod is %s", provisionPod)
if len(provisionPod) > 0 {
e2e.Logf("provisionPod len is %d. provisionPod[0] is %s", len(provisionPod), provisionPod[0])
provisionPodLogsFile := "logs_output_" + getRandomString()[:ClusterSuffixLen] + ".txt"
provisionPodLogs, _ := oc.AsAdmin().WithoutNamespace().Run("logs").Args(provisionPod[0], "-c", "hive", "-n", namespace).OutputToFile(provisionPodLogsFile)
defer os.Remove(provisionPodLogs)
failLogs, _ := exec.Command("bash", "-c", "grep -E 'level=error|level=fatal' "+provisionPodLogs).Output()
if len(failLogs) <= LogsLimitLen {
e2e.Logf("provisionPodLogs is %s", failLogs)
} else {
e2e.Logf("provisionPodLogs is %s", failLogs[len(failLogs)-LogsLimitLen:])
}
}
}
}
// check if the target string is in a string slice
func ContainsInStringSlice(items []string, item string) bool {
for _, eachItem := range items {
if eachItem == item {
return true
}
}
return false
}
func getInfraIDFromCDName(oc *exutil.CLI, cdName string) string {
var (
infraID string
err error
)
getInfraIDFromCD := func() bool {
infraID, _, err = oc.AsAdmin().Run("get").Args("cd", cdName, "-o=jsonpath={.spec.clusterMetadata.infraID}").Outputs()
return err == nil && strings.HasPrefix(infraID, cdName)
}
o.Eventually(getInfraIDFromCD).WithTimeout(10 * time.Minute).WithPolling(5 * time.Second).Should(o.BeTrue())
e2e.Logf("Found infraID = %v", infraID)
return infraID
}
func getClusterprovisionName(oc *exutil.CLI, cdName, namespace string) string {
var ClusterprovisionName string
var err error
waitForClusterprovision := func() bool {
ClusterprovisionName, _, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterDeployment", cdName, "-n", namespace, "-o=jsonpath={.status.provisionRef.name}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(ClusterprovisionName, cdName) {
return true
} else {
return false
}
}
o.Eventually(waitForClusterprovision).WithTimeout(DefaultTimeout * time.Second).WithPolling(3 * time.Second).Should(o.BeTrue())
return ClusterprovisionName
}
func getProvisionPodNames(oc *exutil.CLI, cdName, namespace string) (provisionPodNames []string) {
// For "kubectl get", the default sorting order is alphabetical
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "hive.openshift.io/job-type=provision", "-l", "hive.openshift.io/cluster-deployment-name="+cdName, "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
for _, provisionPodName := range strings.Split(stdout, " ") {
o.Expect(provisionPodName).To(o.ContainSubstring("provision"))
o.Expect(provisionPodName).To(o.ContainSubstring(cdName))
provisionPodNames = append(provisionPodNames, provisionPodName)
}
return
}
func getDeprovisionPodName(oc *exutil.CLI, cdName, namespace string) string {
var DeprovisionPodName string
var err error
waitForDeprovisionPod := func() bool {
DeprovisionPodName, _, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "hive.openshift.io/job-type=deprovision", "-l", "hive.openshift.io/cluster-deployment-name="+cdName, "-n", namespace, "-o=jsonpath={.items[0].metadata.name}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(DeprovisionPodName, cdName) && strings.Contains(DeprovisionPodName, "uninstall") {
return true
} else {
return false
}
}
o.Eventually(waitForDeprovisionPod).WithTimeout(DefaultTimeout * time.Second).WithPolling(3 * time.Second).Should(o.BeTrue())
return DeprovisionPodName
}
/*
Looks for targetLines in the transformed provision log stream with a timeout.
Default lineTransformation is the identity function.
Suitable for test cases for which logs can be checked before the provision is finished.
Example:
Provision logs (logStream.r's underlying data) = "foo\nbar\nbaz\nquux";
targetLines = []string{"ar", "baz", "qu"};
lineTransformation = nil;
targetLines found in provision logs -> returns true
*/
func assertLogs(logStream *os.File, targetLines []string, lineTransformation func(line string) string, timeout time.Duration) bool {
// Set timeout (applies to future AND currently-blocked Read calls)
endTime := time.Now().Add(timeout)
err := logStream.SetReadDeadline(endTime)
o.Expect(err).NotTo(o.HaveOccurred())
// Default line transformation: the identity function
if lineTransformation == nil {
e2e.Logf("Using default line transformation (the identity function)")
lineTransformation = func(line string) string { return line }
}
// Line scanning
scanner := bufio.NewScanner(logStream)
targetIdx := 0
// In case of timeout, current & subsequent Read calls error out, resulting in scanner.Scan() returning false immediately
for scanner.Scan() {
switch tranformedLine, targetLine := lineTransformation(scanner.Text()), targetLines[targetIdx]; {
// We have a match, proceed to the next target line
case targetIdx == 0 && strings.HasSuffix(tranformedLine, targetLine) ||
targetIdx == len(targetLines)-1 && strings.HasPrefix(tranformedLine, targetLine) ||
tranformedLine == targetLine:
if targetIdx++; targetIdx == len(targetLines) {
e2e.Logf("Found substring [%v] in the logs", strings.Join(targetLines, "\n"))
return true
}
// Restart from target line 0
default:
targetIdx = 0
}
}
return false
}
func removeResource(oc *exutil.CLI, parameters ...string) {
output, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(parameters...).Output()
if err != nil && (strings.Contains(output, "NotFound") || strings.Contains(output, "No resources found")) {
e2e.Logf("No resource found!")
return
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (hc *hiveconfig) delete(oc *exutil.CLI) {
removeResource(oc, "hiveconfig", "hive")
}
// Create pull-secret in current project namespace
func createPullSecret(oc *exutil.CLI, namespace string) {
dirname := "/tmp/" + oc.Namespace() + "-pull"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--to="+dirname, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("create").Args("secret", "generic", "pull-secret", "--from-file="+dirname+"/.dockerconfigjson", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// Create AWS credentials in current project namespace
func createAWSCreds(oc *exutil.CLI, namespace string) {
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/aws-creds", "-n", "kube-system", "--to="+dirname, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("create").Args("secret", "generic", "aws-creds", "--from-file="+dirname+"/aws_access_key_id", "--from-file="+dirname+"/aws_secret_access_key", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// Create Route53 AWS credentials in hive namespace
func createRoute53AWSCreds(oc *exutil.CLI, namespace string) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "route53-aws-creds", "-n", HiveNamespace).Output()
if strings.Contains(output, "NotFound") || err != nil {
e2e.Logf("No route53-aws-creds, Create it.")
dirname := "/tmp/" + oc.Namespace() + "-route53-creds"
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/aws-creds", "-n", "kube-system", "--to="+dirname, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "route53-aws-creds", "--from-file="+dirname+"/aws_access_key_id", "--from-file="+dirname+"/aws_secret_access_key", "-n", HiveNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
e2e.Logf("route53-aws-creds already exists.")
}
}
// Create Azure credentials in current project namespace
func createAzureCreds(oc *exutil.CLI, namespace string) {
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
var azureClientID, azureClientSecret, azureSubscriptionID, azureTenantID string
azureClientID, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "--template='{{.data.azure_client_id | base64decode}}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
azureClientSecret, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "--template='{{.data.azure_client_secret | base64decode}}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
azureSubscriptionID, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "--template='{{.data.azure_subscription_id | base64decode}}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
azureTenantID, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "--template='{{.data.azure_tenant_id | base64decode}}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//Convert credentials to osServicePrincipal.json format
output := fmt.Sprintf("{\"subscriptionId\":\"%s\",\"clientId\":\"%s\",\"clientSecret\":\"%s\",\"tenantId\":\"%s\"}", azureSubscriptionID[1:len(azureSubscriptionID)-1], azureClientID[1:len(azureClientID)-1], azureClientSecret[1:len(azureClientSecret)-1], azureTenantID[1:len(azureTenantID)-1])
outputFile, outputErr := os.OpenFile(dirname+"/osServicePrincipal.json", os.O_CREATE|os.O_WRONLY, 0666)
o.Expect(outputErr).NotTo(o.HaveOccurred())
defer outputFile.Close()
outputWriter := bufio.NewWriter(outputFile)
writeByte, writeError := outputWriter.WriteString(output)
o.Expect(writeError).NotTo(o.HaveOccurred())
writeError = outputWriter.Flush()
o.Expect(writeError).NotTo(o.HaveOccurred())
e2e.Logf("%d byte written to osServicePrincipal.json", writeByte)
err = oc.Run("create").Args("secret", "generic", AzureCreds, "--from-file="+dirname+"/osServicePrincipal.json", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// Create GCP credentials in current project namespace
func createGCPCreds(oc *exutil.CLI, namespace string) {
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/gcp-credentials", "-n", "kube-system", "--to="+dirname, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("create").Args("secret", "generic", GCPCreds, "--from-file=osServiceAccount.json="+dirname+"/service_account.json", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func createVSphereCreds(oc *exutil.CLI, namespace, vCenter string) {
username, password := getVSphereCredentials(oc, vCenter)
secret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: VSphereCreds,
Namespace: namespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"username": username,
"password": password,
},
}
_, err := oc.AdminKubeClient().CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
}
// Return release version from Image
func extractRelFromImg(image string) string {
index := strings.Index(image, ":")
if index != -1 {
tempStr := image[index+1:]
index = strings.Index(tempStr, "-")
if index != -1 {
e2e.Logf("Extracted OCP release: %s", tempStr[:index])
return tempStr[:index]
}
}
e2e.Logf("Failed to extract OCP release from Image.")
return ""
}
// Get CD list from Pool
// Return string CD list such as "pool-44945-2bbln5m47s\n pool-44945-f8xlv6m6s"
func getCDlistfromPool(oc *exutil.CLI, pool string) string {
fileName := "cd_output_" + getRandomString() + ".txt"
cdOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cd", "-A").OutputToFile(fileName)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.Remove(cdOutput)
poolCdList, err := exec.Command("bash", "-c", "cat "+cdOutput+" | grep "+pool+" | awk '{print $1}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("CD list is %s for pool %s", poolCdList, pool)
return string(poolCdList)
}
// Extract the kubeconfig for CD/clustername, return its path
func getClusterKubeconfig(oc *exutil.CLI, clustername, namespace, dir string) string {
kubeconfigsecretname, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cd", clustername, "-n", namespace, "-o=jsonpath={.spec.clusterMetadata.adminKubeconfigSecretRef.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Extract cluster %s kubeconfig to %s", clustername, dir)
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+kubeconfigsecretname, "-n", namespace, "--to="+dir, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kubeConfigPath := dir + "/kubeconfig"
return kubeConfigPath
}
// Check resource number after filtering
func checkResourceNumber(oc *exutil.CLI, filterName string, resource []string) int {
resourceOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource...).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Count(resourceOutput, filterName)
}
func getPullSecret(oc *exutil.CLI) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/pull-secret", "-n", "openshift-config", `--template={{index .data ".dockerconfigjson" | base64decode}}`).OutputToFile("auth.dockerconfigjson")
}
func getCommitID(oc *exutil.CLI, component string, clusterVersion string) (string, error) {
secretFile, secretErr := getPullSecret(oc)
defer os.Remove(secretFile)
if secretErr != nil {
return "", secretErr
}
outFilePath, ocErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "info", "--registry-config="+secretFile, "--commits", clusterVersion, "--insecure=true").OutputToFile("commitIdLogs.txt")
defer os.Remove(outFilePath)
if ocErr != nil {
return "", ocErr
}
commitID, cmdErr := exec.Command("bash", "-c", "cat "+outFilePath+" | grep "+component+" | awk '{print $3}'").Output()
return strings.TrimSuffix(string(commitID), "\n"), cmdErr
}
func getPullSpec(oc *exutil.CLI, component string, clusterVersion string) (string, error) {
secretFile, secretErr := getPullSecret(oc)
defer os.Remove(secretFile)
if secretErr != nil {
return "", secretErr
}
pullSpec, ocErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "info", "--registry-config="+secretFile, "--image-for="+component, clusterVersion, "--insecure=true").Output()
if ocErr != nil {
return "", ocErr
}
return pullSpec, nil
}
const (
enable = true
disable = false
)
// Expose Hive metrics as a user-defined project
// The cluster's status of monitoring before running this function is stored for recoverability.
// *needRecoverPtr: whether recovering is needed
// *prevConfigPtr: data stored in ConfigMap/cluster-monitoring-config before running this function
func exposeMetrics(oc *exutil.CLI, testDataDir string, needRecoverPtr *bool, prevConfigPtr *string) {
// Look for cluster-level monitoring configuration
getOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Enable user workload monitoring
if len(getOutput) > 0 {
e2e.Logf("ConfigMap cluster-monitoring-config exists, extracting cluster-monitoring-config ...")
extractOutput, _, _ := oc.AsAdmin().WithoutNamespace().Run("extract").Args("ConfigMap/cluster-monitoring-config", "-n", "openshift-monitoring", "--to=-").Outputs()
if strings.Contains(strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(extractOutput, "'", ""), "\"", ""), " ", ""), "enableUserWorkload:true") {
e2e.Logf("User workload is enabled, doing nothing ... ")
*needRecoverPtr, *prevConfigPtr = false, ""
} else {
e2e.Logf("User workload is not enabled, enabling ...")
*needRecoverPtr, *prevConfigPtr = true, extractOutput
extractOutputParts := strings.Split(extractOutput, "\n")
containKeyword := false
for idx, part := range extractOutputParts {
if strings.Contains(part, "enableUserWorkload") {
e2e.Logf("Keyword \"enableUserWorkload\" found in cluster-monitoring-config, setting enableUserWorkload to true ...")
extractOutputParts[idx] = "enableUserWorkload: true"
containKeyword = true
break
}
}
if !containKeyword {
e2e.Logf("Keyword \"enableUserWorkload\" not found in cluster-monitoring-config, adding ...")
extractOutputParts = append(extractOutputParts, "enableUserWorkload: true")
}
modifiedExtractOutput := strings.ReplaceAll(strings.Join(extractOutputParts, "\\n"), "\"", "\\\"")
e2e.Logf("Patching ConfigMap cluster-monitoring-config to enable user workload monitoring ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--type", "merge", "-p", fmt.Sprintf("{\"data\":{\"config.yaml\": \"%s\"}}", modifiedExtractOutput)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
} else {
e2e.Logf("ConfigMap cluster-monitoring-config does not exist, creating ...")
*needRecoverPtr, *prevConfigPtr = true, ""
clusterMonitoringConfigTemp := clusterMonitoringConfig{
enableUserWorkload: true,
namespace: "openshift-monitoring",
template: filepath.Join(testDataDir, "cluster-monitoring-config.yaml"),
}
clusterMonitoringConfigTemp.create(oc)
}
// Check monitoring-related pods are created in the openshift-user-workload-monitoring namespace
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-operator", ok, DefaultTimeout, []string{"pod", "-n", "openshift-user-workload-monitoring"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-user-workload", ok, DefaultTimeout, []string{"pod", "-n", "openshift-user-workload-monitoring"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "thanos-ruler-user-workload", ok, DefaultTimeout, []string{"pod", "-n", "openshift-user-workload-monitoring"}).check(oc)
// Check if ServiceMonitors and PodMonitors are created
e2e.Logf("Checking if ServiceMonitors and PodMonitors exist ...")
getOutput, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ServiceMonitor", "hive-clustersync", "-n", HiveNamespace, "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(getOutput) == 0 {
e2e.Logf("Creating PodMonitor for hive-operator ...")
podMonitorYaml := filepath.Join(testDataDir, "hive-operator-podmonitor.yaml")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", podMonitorYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Creating ServiceMonitor for hive-controllers ...")
serviceMonitorControllers := filepath.Join(testDataDir, "hive-controllers-servicemonitor.yaml")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", serviceMonitorControllers).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Creating ServiceMonitor for hive-clustersync ...")
serviceMonitorClustersync := filepath.Join(testDataDir, "hive-clustersync-servicemonitor.yaml")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", serviceMonitorClustersync).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
// Recover cluster monitoring state, neutralizing the effect of exposeMetrics.
func recoverClusterMonitoring(oc *exutil.CLI, needRecoverPtr *bool, prevConfigPtr *string) {
if *needRecoverPtr {
e2e.Logf("Recovering cluster monitoring configurations ...")
if len(*prevConfigPtr) == 0 {
e2e.Logf("ConfigMap/cluster-monitoring-config did not exist before calling exposeMetrics, deleting ...")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--ignore-not-found").Execute()
if err != nil {
e2e.Logf("Error occurred when deleting ConfigMap/cluster-monitoring-config: %v", err)
}
} else {
e2e.Logf("Reverting changes made to ConfigMap/cluster-monitoring-config ...")
*prevConfigPtr = strings.ReplaceAll(strings.ReplaceAll(*prevConfigPtr, "\n", "\\n"), "\"", "\\\"")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--type", "merge", "-p", fmt.Sprintf("{\"data\":{\"config.yaml\": \"%s\"}}", *prevConfigPtr)).Execute()
if err != nil {
e2e.Logf("Error occurred when patching ConfigMap/cluster-monitoring-config: %v", err)
}
}
e2e.Logf("Deleting ServiceMonitors and PodMonitors in the hive namespace ...")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ServiceMonitor", "hive-clustersync", "-n", HiveNamespace, "--ignore-not-found").Execute()
if err != nil {
e2e.Logf("Error occurred when deleting ServiceMonitor/hive-clustersync: %v", err)
}
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("ServiceMonitor", "hive-controllers", "-n", HiveNamespace, "--ignore-not-found").Execute()
if err != nil {
e2e.Logf("Error occurred when deleting ServiceMonitor/hive-controllers: %v", err)
}
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("PodMonitor", "hive-operator", "-n", HiveNamespace, "--ignore-not-found").Execute()
if err != nil {
e2e.Logf("Error occurred when deleting PodMonitor/hive-operator: %v", err)
}
return
}
e2e.Logf("No recovering needed for cluster monitoring configurations. ")
}
// If enable hive exportMetric
func exportMetric(oc *exutil.CLI, action bool) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HiveConfig", "hive", "-o=jsonpath={.spec.exportMetrics}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if action {
if strings.Contains(output, "true") {
e2e.Logf("The exportMetrics has been enabled in hiveconfig, won't change")
} else {
e2e.Logf("Enable hive exportMetric in Hiveconfig.")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"HiveConfig", "hive", "--type", "merge", "-p", `{"spec":{"exportMetrics": true}}`}).check(oc)
hiveNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Hiveconfig", "hive", "-o=jsonpath={.spec.targetNamespace}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(hiveNS).NotTo(o.BeEmpty())
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-k8s", ok, DefaultTimeout, []string{"role", "-n", hiveNS}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-k8s", ok, DefaultTimeout, []string{"rolebinding", "-n", hiveNS}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-clustersync", ok, DefaultTimeout, []string{"servicemonitor", "-n", hiveNS, "-o=name"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", ok, DefaultTimeout, []string{"servicemonitor", "-n", hiveNS, "-o=name"}).check(oc)
}
}
if !action {
if !strings.Contains(output, "true") {
e2e.Logf("The exportMetrics has been disabled in hiveconfig, won't change")
} else {
e2e.Logf("Disable hive exportMetric in Hiveconfig.")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"HiveConfig", "hive", "--type", "merge", "-p", `{"spec":{"exportMetrics": false}}`}).check(oc)
hiveNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Hiveconfig", "hive", "-o=jsonpath={.spec.targetNamespace}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(hiveNS).NotTo(o.BeEmpty())
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-k8s", nok, DefaultTimeout, []string{"role", "-n", hiveNS}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-k8s", nok, DefaultTimeout, []string{"rolebinding", "-n", hiveNS}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-clustersync", nok, DefaultTimeout, []string{"servicemonitor", "-n", hiveNS, "-o=name"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", nok, DefaultTimeout, []string{"servicemonitor", "-n", hiveNS, "-o=name"}).check(oc)
}
}
}
func doPrometheusQuery(oc *exutil.CLI, token string, url string, query string) prometheusQueryResult {
var data prometheusQueryResult
msg, _, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(
"-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "-i", "--",
"curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", token),
fmt.Sprintf("%s%s", url, query)).Outputs()
if err != nil {
e2e.Failf("Failed Prometheus query, error: %v", err)
}
o.Expect(msg).NotTo(o.BeEmpty())
json.Unmarshal([]byte(msg), &data)
return data
}
// parameter expect: ok, expected to have expectContent; nok, not expected to have expectContent
func checkMetricExist(oc *exutil.CLI, expect bool, token string, url string, query []string) {
for _, v := range query {
e2e.Logf("Check metric %s", v)
err := wait.Poll(1*time.Minute, (ClusterResumeTimeout/60)*time.Minute, func() (bool, error) {
data := doPrometheusQuery(oc, token, url, v)
if expect && len(data.Data.Result) > 0 {
e2e.Logf("Metric %s exist, expected", v)
return true, nil
}
if !expect && len(data.Data.Result) == 0 {
e2e.Logf("Metric %s doesn't exist, expected", v)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "\"checkMetricExist\" fail, can not get expected result")
}
}
func checkResourcesMetricValue(oc *exutil.CLI, resourceName, resourceNamespace string, expectedResult string, token string, url string, query string) {
err := wait.Poll(1*time.Minute, (ClusterResumeTimeout/60)*time.Minute, func() (bool, error) {
data := doPrometheusQuery(oc, token, url, query)
for _, v := range data.Data.Result {
switch query {
case "hive_clusterclaim_assignment_delay_seconds_count", "hive_clusterpool_stale_clusterdeployments_deleted":
if v.Metric.ClusterpoolName == resourceName && v.Metric.ClusterpoolNamespace == resourceNamespace {
e2e.Logf("Found metric for pool %s in namespace %s", resourceName, resourceNamespace)
if v.Value[1].(string) == expectedResult {
e2e.Logf("The metric Value %s matches expected %s", v.Value[1].(string), expectedResult)
return true, nil
}
e2e.Logf("The metric Value %s didn't match expected %s, try next round", v.Value[1].(string), expectedResult)
return false, nil
}
case "hive_cluster_deployment_provision_underway_install_restarts":
if v.Metric.ClusterDeployment == resourceName && v.Metric.ExportedNamespace == resourceNamespace {
e2e.Logf("Found metric for ClusterDeployment %s in namespace %s", resourceName, resourceNamespace)
if v.Value[1].(string) == expectedResult {
e2e.Logf("The metric Value %s matches expected %s", v.Value[1].(string), expectedResult)
return true, nil
}
e2e.Logf("The metric Value %s didn't match expected %s, try next round", v.Value[1].(string), expectedResult)
return false, nil
}
case "hive_cluster_deployment_install_success_total_count":
if v.Metric.Region == resourceName && v.Metric.Namespace == resourceNamespace {
if data.Data.Result[0].Metric.InstallAttempt == expectedResult {
e2e.Logf("The region %s has %s install attempts", v.Metric.Region, data.Data.Result[0].Metric.InstallAttempt)
return true, nil
}
e2e.Logf("The metric InstallAttempt label %s didn't match expected %s, try next round", data.Data.Result[0].Metric.InstallAttempt, expectedResult)
return false, nil
}
case "hive_cluster_deployment_install_failure_total_count":
if v.Metric.Region == resourceName && v.Metric.Namespace == resourceNamespace {
if data.Data.Result[2].Metric.InstallAttempt == expectedResult {
e2e.Logf("The region %s has %s install attempts", v.Metric.Region, data.Data.Result[2].Metric.InstallAttempt)
return true, nil
}
e2e.Logf("The metric InstallAttempt label %s didn't match expected %s, try next round", data.Data.Result[2].Metric.InstallAttempt, expectedResult)
return false, nil
}
}
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "\"checkResourcesMetricValue\" fail, can not get expected result")
}
func checkHiveConfigMetric(oc *exutil.CLI, field string, expectedResult string, token string, url string, query string) {
err := wait.Poll(1*time.Minute, (ClusterResumeTimeout/60)*time.Minute, func() (bool, error) {
data := doPrometheusQuery(oc, token, url, query)
switch field {
case "condition":
if data.Data.Result[0].Metric.Condition == expectedResult {
e2e.Logf("the Metric %s field \"%s\" matched the expected result \"%s\"", query, field, expectedResult)
return true, nil
}
case "reason":
if data.Data.Result[0].Metric.Reason == expectedResult {
e2e.Logf("the Metric %s field \"%s\" matched the expected result \"%s\"", query, field, expectedResult)
return true, nil
}
default:
e2e.Logf("the Metric %s doesn't contain field %s", query, field)
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "\"checkHiveConfigMetric\" fail, can not get expected result")
}
func createCD(testDataDir string, testOCPImage string, oc *exutil.CLI, ns string, installConfigSecret interface{}, cd interface{}) {
switch x := cd.(type) {
case clusterDeployment:
exutil.By("Create AWS ClusterDeployment..." + ns)
imageSet := clusterImageSet{
name: x.name + "-imageset",
releaseImage: testOCPImage,
template: filepath.Join(testDataDir, "clusterimageset.yaml"),
}
exutil.By("Create ClusterImageSet...")
imageSet.create(oc)
//secrets can be accessed by pod in the same namespace, so copy pull-secret and aws-creds to target namespace for the pool
exutil.By("Copy AWS platform credentials...")
createAWSCreds(oc, ns)
exutil.By("Copy pull-secret...")
createPullSecret(oc, ns)
exutil.By("Create AWS Install-Config Secret...")
switch ic := installConfigSecret.(type) {
case installConfig:
ic.create(oc)
default:
e2e.Failf("Incorrect install-config type")
}
x.create(oc)
case gcpClusterDeployment:
exutil.By("Create gcp ClusterDeployment..." + ns)
imageSet := clusterImageSet{
name: x.name + "-imageset",
releaseImage: testOCPImage,
template: filepath.Join(testDataDir, "clusterimageset.yaml"),
}
exutil.By("Create ClusterImageSet...")
imageSet.create(oc)
//secrets can be accessed by pod in the same namespace, so copy pull-secret and aws-creds to target namespace for the pool
exutil.By("Copy GCP platform credentials...")
createGCPCreds(oc, ns)
exutil.By("Copy pull-secret...")
createPullSecret(oc, ns)
exutil.By("Create GCP Install-Config Secret...")
switch ic := installConfigSecret.(type) {
case gcpInstallConfig:
ic.create(oc)
default:
e2e.Failf("Incorrect install-config type")
}
x.create(oc)
case azureClusterDeployment:
exutil.By("Create azure ClusterDeployment..." + ns)
imageSet := clusterImageSet{
name: x.name + "-imageset",
releaseImage: testOCPImage,
template: filepath.Join(testDataDir, "clusterimageset.yaml"),
}
exutil.By("Create ClusterImageSet...")
imageSet.create(oc)
//secrets can be accessed by pod in the same namespace, so copy pull-secret and aws-creds to target namespace for the pool
exutil.By("Copy Azure platform credentials...")
createAzureCreds(oc, ns)
exutil.By("Copy pull-secret...")
createPullSecret(oc, ns)
exutil.By("Create Azure Install-Config Secret...")
switch ic := installConfigSecret.(type) {
case azureInstallConfig:
ic.create(oc)
default:
e2e.Failf("Incorrect install-config type")
}
x.create(oc)
case vSphereClusterDeployment:
exutil.By("Creating vSphere ClusterDeployment in namespace: " + ns)
imageSet := clusterImageSet{
name: x.name + "-imageset",
releaseImage: testOCPImage,
template: filepath.Join(testDataDir, "clusterimageset.yaml"),
}
exutil.By("Creating ClusterImageSet")
imageSet.create(oc)
exutil.By("Copying vSphere platform credentials")
createVSphereCreds(oc, ns, x.vCenter)
exutil.By("Copying pull-secret")
createPullSecret(oc, ns)
exutil.By("Creating vCenter certificates Secret")
createVsphereCertsSecret(oc, ns, x.vCenter)
exutil.By("Creating vSphere Install-Config Secret")
switch ic := installConfigSecret.(type) {
case vSphereInstallConfig:
ic.create(oc)
default:
e2e.Failf("Incorrect install-config type")
}
x.create(oc)
default:
exutil.By("Unknown ClusterDeployment type")
}
}
func cleanCD(oc *exutil.CLI, clusterImageSetName string, ns string, secretName string, cdName string) {
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", clusterImageSetName})
defer cleanupObjects(oc, objectTableRef{"Secret", ns, secretName})
defer cleanupObjects(oc, objectTableRef{"ClusterDeployment", ns, cdName})
}
// Install Hive Operator if not existent
func installHiveOperator(oc *exutil.CLI, ns *hiveNameSpace, og *operatorGroup, sub *subscription, hc *hiveconfig, testDataDir string) (string, error) {
nsTemp := filepath.Join(testDataDir, "namespace.yaml")
ogTemp := filepath.Join(testDataDir, "operatorgroup.yaml")
subTemp := filepath.Join(testDataDir, "subscription.yaml")
hcTemp := filepath.Join(testDataDir, "hiveconfig.yaml")
*ns = hiveNameSpace{
name: HiveNamespace,
template: nsTemp,
}
*og = operatorGroup{
name: "hive-og",
namespace: HiveNamespace,
template: ogTemp,
}
*sub = subscription{
name: "hive-sub",
namespace: HiveNamespace,
channel: "alpha",
approval: "Automatic",
operatorName: "hive-operator",
sourceName: "community-operators",
sourceNamespace: "openshift-marketplace",
startingCSV: "",
currentCSV: "",
installedCSV: "",
template: subTemp,
}
*hc = hiveconfig{
logLevel: "debug",
targetNamespace: HiveNamespace,
template: hcTemp,
}
// Create Hive Resources if not exist
ns.createIfNotExist(oc)
og.createIfNotExist(oc)
sub.createIfNotExist(oc)
hc.createIfNotExist(oc)
return "success", nil
}
// Get hiveadmission pod name
func getHiveadmissionPod(oc *exutil.CLI, namespace string) string {
hiveadmissionOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector=app=hiveadmission", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podArray := strings.Split(strings.TrimSpace(hiveadmissionOutput), " ")
o.Expect(len(podArray)).To(o.BeNumerically(">", 0))
e2e.Logf("Hiveadmission pod list is %s,first pod name is %s", podArray, podArray[0])
return podArray[0]
}
// Get hivecontrollers pod name
func getHivecontrollersPod(oc *exutil.CLI, namespace string) string {
hivecontrollersOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector=control-plane=controller-manager", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podArray := strings.Split(strings.TrimSpace(hivecontrollersOutput), " ")
o.Expect(len(podArray)).To(o.BeNumerically(">", 0))
e2e.Logf("Hivecontrollers pod list is %s,first pod name is %s", podArray, podArray[0])
return podArray[0]
}
func getTestOCPImage() string {
testImageVersion := "4.19"
testOCPImage, err := exutil.GetLatestNightlyImage(testImageVersion)
o.Expect(err).NotTo(o.HaveOccurred())
if testOCPImage == "" {
e2e.Failf("Failed to get image for version %v", testImageVersion)
}
return testOCPImage
}
func getCondition(oc *exutil.CLI, kind, resourceName, namespace, conditionType string) map[string]string {
e2e.Logf("Extracting the %v condition from %v/%v in namespace %v", conditionType, kind, resourceName, namespace)
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(kind, resourceName, "-n", namespace, fmt.Sprintf("-o=jsonpath={.status.conditions[?(@.type==\"%s\")]}", conditionType)).Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
var condition map[string]string
// Avoid Unmarshal failure when stdout is empty
if len(stdout) == 0 {
e2e.Logf("Condition %v not found on %v/%v in namespace %v", conditionType, kind, resourceName, namespace)
return condition
}
err = json.Unmarshal([]byte(stdout), &condition)
o.Expect(err).NotTo(o.HaveOccurred())
return condition
}
func checkCondition(oc *exutil.CLI, kind, resourceName, namespace, conditionType string, expectKeyValue map[string]string, hint string) func() bool {
e2e.Logf(hint)
return func() bool {
condition := getCondition(oc, kind, resourceName, namespace, conditionType)
for key, expectValue := range expectKeyValue {
if actualValue, ok := condition[key]; !ok || actualValue != expectValue {
e2e.Logf("For condition %s's %s, expected value is %s, actual value is %v, retrying ...", conditionType, key, expectValue, actualValue)
return false
}
}
e2e.Logf("For condition %s, all fields checked are expected, proceeding to the next step ...", conditionType)
return true
}
}
// Get AWS credentials from root credentials, mount paths, and then from external configurations (in that order)
func getAWSCredentials(oc *exutil.CLI, mountPaths ...string) (AWSAccessKeyID string, AWSSecretAccessKey string) {
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/aws-creds", "-n=kube-system").Execute()
switch {
// Try root credentials
case err == nil:
e2e.Logf("Extracting AWS credentials from root credentials")
AWSAccessKeyID, _, err = oc.
AsAdmin().
WithoutNamespace().
Run("extract").
Args("secret/aws-creds", "-n=kube-system", "--keys=aws_access_key_id", "--to=-").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
AWSSecretAccessKey, _, err = oc.
AsAdmin().
WithoutNamespace().
Run("extract").
Args("secret/aws-creds", "-n=kube-system", "--keys=aws_secret_access_key", "--to=-").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
// Try mount paths
case len(mountPaths) > 0:
e2e.Logf("Extracting AWS creds from credential mounts")
e2e.Logf("Is the test running in the CI environment, targeting a non-AWS platform ?")
re, err := regexp.Compile(AWSCredsPattern)
o.Expect(err).NotTo(o.HaveOccurred())
for _, mountPath := range mountPaths {
e2e.Logf("Extracting AWS creds from path %s", mountPath)
fileBs, err := os.ReadFile(mountPath)
if err != nil {
e2e.Logf("Failed to read file: %v", err)
continue
}
matches := re.FindStringSubmatch(string(fileBs))
if len(matches) != 3 {
e2e.Logf("Incorrect credential format")
continue
}
AWSAccessKeyID = matches[1]
AWSSecretAccessKey = matches[2]
break
}
// Fall back to external configurations
default:
e2e.Logf("Extracting AWS creds from external configurations")
e2e.Logf("Is the test running locally, targeting a non-AWS platform ?")
if cfg, err := config.LoadDefaultConfig(context.Background()); err == nil {
creds, retrieveErr := cfg.Credentials.Retrieve(context.Background())
o.Expect(retrieveErr).NotTo(o.HaveOccurred())
AWSAccessKeyID = creds.AccessKeyID
AWSSecretAccessKey = creds.SecretAccessKey
}
}
o.Expect(AWSAccessKeyID).NotTo(o.BeEmpty())
o.Expect(AWSSecretAccessKey).NotTo(o.BeEmpty())
return
}
// Extract vSphere root credentials
func getVSphereCredentials(oc *exutil.CLI, vCenter string) (username string, password string) {
var err error
username, _, err = oc.
AsAdmin().
WithoutNamespace().
Run("extract").
Args("secret/vsphere-creds", "-n=kube-system", fmt.Sprintf("--keys=%v.username", vCenter), "--to=-").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(username).NotTo(o.BeEmpty())
password, _, err = oc.
AsAdmin().
WithoutNamespace().
Run("extract").
Args("secret/vsphere-creds", "-n=kube-system", fmt.Sprintf("--keys=%v.password", vCenter), "--to=-").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
// This assertion fails only when the password is an empty string, so the password is never logged out.
o.Expect(password).NotTo(o.BeEmpty())
return
}
// getAWSConfig gets AWS-SDK-V2 configurations with static credentials for the provided region
func getAWSConfig(oc *exutil.CLI, region string, secretMountPaths ...string) aws.Config {
AWSAccessKeyID, AWSSecretAccessKey := getAWSCredentials(oc, secretMountPaths...)
cfg, err := config.LoadDefaultConfig(
context.Background(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(AWSAccessKeyID, AWSSecretAccessKey, "")),
config.WithRegion(region),
)
o.Expect(err).NotTo(o.HaveOccurred())
return cfg
}
// Customize the function to obtain a Lego DNS provider to avoid setting environment variables
// as the default implementation read from them
func newLegoDNSProvider(
maxRetries, TTL int,
propagationTimeout, pollingInterval time.Duration,
accessKeyID, secretAccessKey, region string,
) (*legoroute53.DNSProvider, error) {
legoRoute53Config := &legoroute53.Config{
Region: region,
MaxRetries: maxRetries,
TTL: TTL,
PropagationTimeout: propagationTimeout,
PollingInterval: pollingInterval,
AccessKeyID: accessKeyID,
SecretAccessKey: secretAccessKey,
}
return legoroute53.NewDNSProviderConfig(legoRoute53Config)
}
// Extract hiveutil (from the latest Hive image) into dir and return the executable's path
func extractHiveutil(oc *exutil.CLI, dir string) string {
latestImgTagStr := getLatestHiveVersion()
e2e.Logf("Extracting hiveutil from image %v (latest) ...", latestImgTagStr)
err := oc.
AsAdmin().
WithoutNamespace().
Run("image", "extract").
Args(fmt.Sprintf("quay.io/%s/hive:%s", HiveImgRepoOnQuay, latestImgTagStr), "--path", "/usr/bin/hiveutil:"+dir).
Execute()
o.Expect(err).NotTo(o.HaveOccurred())
hiveutilPath := dir + "/hiveutil"
e2e.Logf("Making hiveutil executable ...")
cmd := exec.Command("chmod", "+x", hiveutilPath)
_, err = cmd.CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Making sure hiveutil is functional ...")
cmd = exec.Command(hiveutilPath)
out, err := cmd.CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(string(out)).To(o.ContainSubstring("Available Commands"))
o.Expect(string(out)).To(o.ContainSubstring("awsprivatelink"))
return hiveutilPath
}
func getNodeNames(oc *exutil.CLI, labels map[string]string) []string {
e2e.Logf("Extracting Node names")
args := []string{"node"}
for k, v := range labels {
args = append(args, fmt.Sprintf("--selector=%s=%s", k, v))
}
args = append(args, "-o=jsonpath={.items[*].metadata.name}")
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
nodeNames := strings.Split(stdout, " ")
e2e.Logf("Nodes extracted = %v", nodeNames)
return nodeNames
}
// machinePoolName is MachinePool.spec.name
func getMachinePoolInstancesIds(oc *exutil.CLI, machinePoolName string, kubeconfigPath string) []string {
// The command below does not error out if the selector does not have a match
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").
Args(
"machine",
fmt.Sprintf("--selector=machine.openshift.io/cluster-api-machine-role=%s", machinePoolName),
"-n", "openshift-machine-api", "-o=jsonpath={.items[*].status.providerStatus.instanceId}",
"--kubeconfig", kubeconfigPath,
).
Outputs()
// When stdout is an empty string, strings.Split(stdout, " ") = []string{""}
// We do not want this, so return an empty slice
if len(stdout) == 0 {
return []string{}
}
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Split(stdout, " ")
}
func getBasedomain(oc *exutil.CLI) string {
stdout, _, err := oc.
AsAdmin().
WithoutNamespace().
Run("get").
Args("dns/cluster", "-o=jsonpath={.spec.baseDomain}").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(stdout).To(o.ContainSubstring("."))
basedomain := stdout[strings.Index(stdout, ".")+1:]
e2e.Logf("Found base domain = %v", basedomain)
return basedomain
}
func getRegion(oc *exutil.CLI) string {
infrastructure, err := oc.
AdminConfigClient().
ConfigV1().
Infrastructures().
Get(context.Background(), "cluster", metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
var region string
switch platform := strings.ToLower(string(infrastructure.Status.PlatformStatus.Type)); platform {
case "aws":
region = infrastructure.Status.PlatformStatus.AWS.Region
case "azure":
region, _, err = oc.
AsAdmin().
WithoutNamespace().
Run("get").
Args("nodes", "-o=jsonpath={.items[0].metadata.labels['topology\\.kubernetes\\.io/region']}").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
case "gcp":
region = infrastructure.Status.PlatformStatus.GCP.Region
default:
e2e.Failf("Unknown platform: %s", platform)
}
e2e.Logf("Found region = %v", region)
return region
}
// Download root certificates for vCenter, then creates a Secret for them.
func createVsphereCertsSecret(oc *exutil.CLI, ns, vCenter string) {
// Notes:
// 1) As we do not necessarily have access to the vCenter URL, we'd better run commands on the ephemeral cluster.
// 2) For some reason, /certs/download.zip might contain root certificates for a co-hosted (alias) domain.
// Provision will fail when this happens. As a result, we need to get an additional set of certificates
// with openssl, and merge those certificates into the ones obtained with wget.
// TODO: is certificates obtained though openssl sufficient themselves (probably yes) ?
e2e.Logf("Getting certificates from the ephemeral cluster")
commands := fmt.Sprintf("yum install -y unzip && "+
"wget https://%v/certs/download.zip --no-check-certificate && "+
"unzip download.zip && "+
"cat certs/lin/*.0 && "+
"openssl s_client -host %v -port 443 -showcerts", vCenter, vCenter)
// No need to recover labels set on oc.Namespace()
err := exutil.SetNamespacePrivileged(oc, oc.Namespace())
o.Expect(err).NotTo(o.HaveOccurred())
// --to-namespace is required for the CI environment, otherwise
// the API server will throw a "namespace XXX not found" error.
stdout, _, err := oc.
AsAdmin().
WithoutNamespace().
Run("debug").
Args("--to-namespace", oc.Namespace(), "--", "bash", "-c", commands).
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
re, err := regexp.Compile(pemX509CertPattern)
o.Expect(err).NotTo(o.HaveOccurred())
matches := re.FindAllStringSubmatch(stdout, -1)
var certsSlice []string
for _, match := range matches {
certsSlice = append(certsSlice, match[0])
}
certs := strings.Join(certsSlice, "\n")
e2e.Logf("Creating Secret containing root certificates of vCenter %v", vCenter)
certSecret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: VSphereCerts,
Namespace: ns,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
".cacert": certs,
},
}
_, err = oc.AdminKubeClient().CoreV1().Secrets(ns).Create(context.Background(), certSecret, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
}
/*
fReserve: when called, reserves an available IP for each domain from the hostedZoneName hosted zone.
IPs with the following properties can be reserved:
a) Be in the cidr block defined by cidrObj
b) Be in the IP range defined by minIp and maxIp
fRelease: when called, releases the IPs reserved in fReserve().
domain2Ip: maps domain to the IP reserved for it.
*/
func getIps2ReserveFromAWSHostedZone(oc *exutil.CLI, hostedZoneName string, cidrBlock *cidr.CIDR, minIp net.IP,
maxIp net.IP, unavailableIps []string, awsCredsFilePath string, domains2Reserve []string) (fReserve func(),
fRelease func(), domain2Ip map[string]string) {
// Route 53 is global so any region will do
var cfg aws.Config
if awsCredsFilePath == "" {
cfg = getAWSConfig(oc, AWSRegion)
} else {
cfg = getAWSConfig(oc, AWSRegion, awsCredsFilePath)
}
route53Client := route53.NewFromConfig(cfg)
// Get hosted zone ID
var hostedZoneId *string
listHostedZonesByNameOutput, err := route53Client.ListHostedZonesByName(context.Background(),
&route53.ListHostedZonesByNameInput{
DNSName: aws.String(hostedZoneName),
},
)
o.Expect(err).NotTo(o.HaveOccurred())
hostedZoneFound := false
for _, hostedZone := range listHostedZonesByNameOutput.HostedZones {
if strings.TrimSuffix(aws.ToString(hostedZone.Name), ".") == hostedZoneName {
hostedZoneFound = true
hostedZoneId = hostedZone.Id
break
}
}
o.Expect(hostedZoneFound).To(o.BeTrue())
e2e.Logf("Found hosted zone id = %v", aws.ToString(hostedZoneId))
// Get reserved IPs in cidr
reservedIps := sets.New[string](unavailableIps...)
listResourceRecordSetsPaginator := route53.NewListResourceRecordSetsPaginator(
route53Client,
&route53.ListResourceRecordSetsInput{
HostedZoneId: hostedZoneId,
},
)
for listResourceRecordSetsPaginator.HasMorePages() {
// Get a page of record sets
listResourceRecordSetsOutput, listResourceRecordSetsErr := listResourceRecordSetsPaginator.NextPage(context.Background())
o.Expect(listResourceRecordSetsErr).NotTo(o.HaveOccurred())
// Iterate records, mark IPs which belong to the cidr block as reservedIps
for _, recordSet := range listResourceRecordSetsOutput.ResourceRecordSets {
for _, resourceRecord := range recordSet.ResourceRecords {
if ip := aws.ToString(resourceRecord.Value); cidrBlock.Contains(ip) {
reservedIps.Insert(ip)
}
}
}
}
e2e.Logf("Found reserved IPs = %v", reservedIps.UnsortedList())
// Get available IPs in cidr which do not exceed the range defined by minIp and maxIp
var ips2Reserve []string
err = cidrBlock.EachFrom(minIp.String(), func(ip string) bool {
// Stop if IP exceeds maxIp or no more IPs to reserve
if cidr.IPCompare(net.ParseIP(ip), maxIp) == 1 || len(ips2Reserve) == len(domains2Reserve) {
return false
}
// Reserve available IP
if !reservedIps.Has(ip) {
ips2Reserve = append(ips2Reserve, ip)
}
return true
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(domains2Reserve)).To(o.Equal(len(ips2Reserve)), "Not enough available IPs to reserve")
e2e.Logf("IPs to reserve = %v", ips2Reserve)
e2e.Logf("Domains to reserve = %v", domains2Reserve)
// Get functions to reserve/release IPs
var recordSetChanges4Reservation, recordSetChanges4Release []types.Change
domain2Ip = make(map[string]string)
for i, domain2Reserve := range domains2Reserve {
ip2Reserve := ips2Reserve[i]
domain2Ip[domain2Reserve] = ip2Reserve
e2e.Logf("Will reserve IP %v for domain %v", ip2Reserve, domain2Reserve)
recordSetChanges4Reservation = append(recordSetChanges4Reservation, types.Change{
Action: types.ChangeActionCreate,
ResourceRecordSet: &types.ResourceRecordSet{
Name: aws.String(domain2Reserve),
Type: types.RRTypeA,
TTL: aws.Int64(60),
ResourceRecords: []types.ResourceRecord{{Value: aws.String(ip2Reserve)}},
},
})
recordSetChanges4Release = append(recordSetChanges4Release, types.Change{
Action: types.ChangeActionDelete,
ResourceRecordSet: &types.ResourceRecordSet{
Name: aws.String(domain2Reserve),
Type: types.RRTypeA,
TTL: aws.Int64(60),
ResourceRecords: []types.ResourceRecord{{Value: aws.String(ip2Reserve)}},
},
})
}
fReserve = func() {
e2e.Logf("Reserving IP addresses with domain to IP injection %v", domain2Ip)
_, reserveErr := route53Client.ChangeResourceRecordSets(
context.Background(),
&route53.ChangeResourceRecordSetsInput{
HostedZoneId: hostedZoneId,
ChangeBatch: &types.ChangeBatch{
Changes: recordSetChanges4Reservation,
},
},
)
o.Expect(reserveErr).NotTo(o.HaveOccurred())
}
fRelease = func() {
e2e.Logf("Releasing IP addresses for domains %v", domains2Reserve)
_, releaseErr := route53Client.ChangeResourceRecordSets(
context.Background(),
&route53.ChangeResourceRecordSetsInput{
HostedZoneId: hostedZoneId,
ChangeBatch: &types.ChangeBatch{
Changes: recordSetChanges4Release,
},
},
)
o.Expect(releaseErr).NotTo(o.HaveOccurred())
}
return
}
// getVSphereCIDR gets vSphere CIDR block, minimum and maximum IPs to be used for API/ingress VIPs.
func getVSphereCIDR(oc *exutil.CLI) (*cidr.CIDR, net.IP, net.IP) {
// Extracting machine network CIDR from install-config works for different network segments,
// including ci-vlan and devqe.
stdout, _, err := oc.
AsAdmin().
WithoutNamespace().
Run("extract").
Args("cm/cluster-config-v1", "-n", "kube-system", "--keys", "install-config", "--to", "-").
Outputs()
var ic minimalInstallConfig
o.Expect(err).NotTo(o.HaveOccurred())
err = yaml.Unmarshal([]byte(stdout), &ic)
o.Expect(err).NotTo(o.HaveOccurred())
machineNetwork := ic.Networking.MachineNetwork[0].CIDR
e2e.Logf("Found machine network segment = %v", machineNetwork)
cidrObj, err := cidr.Parse(machineNetwork)
o.Expect(err).NotTo(o.HaveOccurred())
// We need another (temporary) CIDR object which will change with begin.
cidrObjTemp, err := cidr.Parse(machineNetwork)
o.Expect(err).NotTo(o.HaveOccurred())
begin, end := cidrObjTemp.IPRange()
// The first 2 IPs should not be used
// The next 2 IPs are reserved for the Hive cluster
// We thus skip the first 4 IPs
minIpOffset := 4
for i := 0; i < minIpOffset; i++ {
cidr.IPIncr(begin)
}
e2e.Logf("Min IP = %v, max IP = %v", begin, end)
return cidrObj, begin, end
}
// getVMInternalIPs gets private IPs of cloud VMs
func getVMInternalIPs(oc *exutil.CLI) []string {
stdout, _, err := oc.
AsAdmin().
WithoutNamespace().
Run("get").
Args("node", "-o=jsonpath={.items[*].status.addresses[?(@.type==\"InternalIP\")].address}").
Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Fields(stdout)
}
// Get the environment in which the test runs
func getTestEnv() (tEnv testEnv) {
if val, ok := os.LookupEnv("OPENSHIFT_CI"); ok && val == "true" {
tEnv = testEnvCI
} else if _, ok := os.LookupEnv("JENKINS_HOME"); ok {
tEnv = testEnvJenkins
} else {
tEnv = testEnvLocal
}
return
}
func getAWSCredsFilePath4VSphere(tEnv testEnv) (credsFilePath string) {
switch tEnv {
case testEnvCI:
credsFilePath = VSphereAWSCredsFilePathCI
case testEnvJenkins:
e2e.Failf(`
VSphere test cases are meant to be tested locally (instead of on Jenkins).
In fact, an additional set of AWS credentials are required for DNS setup,
and those credentials are loaded using external AWS configurations (which
are only available locally) when running in non-CI environments.`)
case testEnvLocal:
// Credentials will be retrieved from external configurations using AWS tool chains when running locally.
credsFilePath = ""
default:
e2e.Failf("Unknown test environment")
}
return credsFilePath
}
func createAssumeRolePolicyDocument(principalARN, uuid string) (string, error) {
policy := map[string]interface{}{
"Version": "2012-10-17",
"Statement": []map[string]interface{}{
{
"Effect": "Allow",
"Principal": map[string]string{
"AWS": principalARN,
},
"Action": "sts:AssumeRole",
},
},
}
if uuid != "" {
policyStatements := policy["Statement"].([]map[string]interface{})
policyStatements[0]["Condition"] = map[string]interface{}{
"StringEquals": map[string]string{
"sts:ExternalId": uuid,
},
}
}
policyJSON, err := json.MarshalIndent(policy, "", " ")
if err != nil {
return "", fmt.Errorf("failed to marshal policy: %v", err)
}
return string(policyJSON), nil
}
// Check if MCE is enabled
func isMCEEnabled(oc *exutil.CLI) bool {
e2e.Logf("Checking if MCE is enabled in the cluster")
checkMCEOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("MultiClusterEngine", "multiclusterengine-sample").Output()
if err != nil {
if strings.Contains(checkMCEOutput, "the server doesn't have a resource type \"MultiClusterEngine\"") {
return false
} else {
e2e.Failf("Failed to check if MCE is enabled in the cluster: %v", err)
}
}
return strings.Contains(checkMCEOutput, "multiclusterengine-sample")
}
// Get the latest Hive Version at HiveImgRepoOnQuay
func getLatestHiveVersion() string {
e2e.Logf("Getting tag of the latest Hive image")
cmd := exec.Command(
"bash",
"-c",
fmt.Sprintf("curl -sk https://quay.io/api/v1/repository/%s/hive/tag/ "+
"| jq '.tags | sort_by(.start_ts) | reverse | .[0].name'", HiveImgRepoOnQuay),
)
latestImgTag, err := cmd.CombinedOutput()
o.Expect(err).NotTo(o.HaveOccurred())
latestImgTagStr := strings.Trim(strings.TrimSuffix(string(latestImgTag), "\n"), "\"")
e2e.Logf("The latest Hive image version is %v ", latestImgTagStr)
return latestImgTagStr
}
| package hive | ||||
function | openshift/openshift-tests-private | 6431f4a0-e789-4a4b-8ab7-cc7d7f1b62ca | applyResourceFromTemplate | ['"encoding/json"', '"os"', '"time"', '"github.com/aws/aws-sdk-go-v2/config"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func applyResourceFromTemplate(oc *exutil.CLI, parameters ...string) error {
var cfgFileJSON string
err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) {
output, err := oc.AsAdmin().Run("process").Args(parameters...).OutputToFile(getRandomString() + "-hive-resource-cfg.json")
if err != nil {
e2e.Logf("the err:%v, and try next round", err)
return false, nil
}
cfgFileJSON = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, "fail to create config file")
e2e.Logf("the file of resource is %s", cfgFileJSON)
defer os.Remove(cfgFileJSON)
return oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", cfgFileJSON).Execute()
} | hive | ||||
function | openshift/openshift-tests-private | a689f444-c936-439c-9276-c9647b7e8246 | getRandomString | ['"math/rand"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getRandomString() string {
chars := "abcdefghijklmnopqrstuvwxyz0123456789"
seed := rand.New(rand.NewSource(time.Now().UnixNano()))
buffer := make([]byte, 8)
for index := range buffer {
buffer[index] = chars[seed.Intn(len(chars))]
}
return string(buffer)
} | hive | ||||
function | openshift/openshift-tests-private | a94c49e6-0fc2-4391-a13b-7cb5a207221f | GetEmail | ['legoUser'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (u *legoUser) GetEmail() string {
return u.Email
} | hive | ||||
function | openshift/openshift-tests-private | c698ac9a-ed6d-4afe-9666-891952cde9eb | GetRegistration | ['"github.com/go-acme/lego/v4/registration"'] | ['legoUser'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (u *legoUser) GetRegistration() *registration.Resource {
return u.Registration
} | hive | |||
function | openshift/openshift-tests-private | d4692bae-f37e-4c38-a626-e0948056c5dd | GetPrivateKey | ['"crypto"'] | ['legoUser'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (u *legoUser) GetPrivateKey() crypto.PrivateKey {
return u.key
} | hive | |||
function | openshift/openshift-tests-private | ecad1c32-e12a-402c-a775-fb9730a8e20b | create | ['"strconv"'] | ['clusterMonitoringConfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (cmc *clusterMonitoringConfig) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cmc.template, "-p", "ENABLEUSERWORKLOAD="+strconv.FormatBool(cmc.enableUserWorkload), "NAMESPACE="+cmc.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | 071081f2-ff90-4426-a536-a9c8c7f7fdc5 | createIfNotExist | ['hiveNameSpace'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (ns *hiveNameSpace) createIfNotExist(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ns.template, "-p", "NAME="+ns.name)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | 5b86e836-7c7f-4aed-882c-f5147cdc3d30 | createIfNotExist | ['operatorGroup'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (og *operatorGroup) createIfNotExist(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", og.template, "-p", "NAME="+og.name, "NAMESPACE="+og.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | 322d13a7-79fb-4f5a-8c4c-1f6938ee3d3e | create | ['"strings"'] | ['subscription'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (sub *subscription) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", sub.template, "-p", "NAME="+sub.name, "NAMESPACE="+sub.namespace, "CHANNEL="+sub.channel,
"APPROVAL="+sub.approval, "OPERATORNAME="+sub.operatorName, "SOURCENAME="+sub.sourceName, "SOURCENAMESPACE="+sub.sourceNamespace, "STARTINGCSV="+sub.startingCSV)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(sub.approval, "Automatic") == 0 {
sub.findInstalledCSV(oc)
} else {
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "UpgradePending", ok, DefaultTimeout, []string{"sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.state}"}).check(oc)
}
} | hive | |||
function | openshift/openshift-tests-private | cc6f80a2-a6a0-4b27-b316-da8e499b81da | createIfNotExist | ['"encoding/json"', '"strings"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['subscription'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (sub *subscription) createIfNotExist(oc *exutil.CLI) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "-n", sub.namespace).Output()
if strings.Contains(output, "NotFound") || strings.Contains(output, "No resources") || err != nil {
e2e.Logf("No hive subscription, Create it.")
err = applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", sub.template, "-p", "NAME="+sub.name, "NAMESPACE="+sub.namespace, "CHANNEL="+sub.channel,
"APPROVAL="+sub.approval, "OPERATORNAME="+sub.operatorName, "SOURCENAME="+sub.sourceName, "SOURCENAMESPACE="+sub.sourceNamespace, "STARTINGCSV="+sub.startingCSV)
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Compare(sub.approval, "Automatic") == 0 {
sub.findInstalledCSV(oc)
} else {
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "UpgradePending", ok, DefaultTimeout, []string{"sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.state}"}).check(oc)
}
//wait for pod running
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"pod", "--selector=control-plane=hive-operator", "-n",
sub.namespace, "-o=jsonpath={.items[0].status.phase}"}).check(oc)
//Check if need to replace with the latest Hive image
hiveDeployedImg, _, err := oc.
AsAdmin().
WithoutNamespace().
Run("get").
Args("csv", sub.installedCSV, "-n", sub.namespace,
"-o", "jsonpath={.spec.install.spec.deployments[0].spec.template.spec.containers[0].image}").
Outputs()
if err != nil {
e2e.Logf("Failed to get Hive image: %v", err)
} else {
e2e.Logf("Found Hive deployed image = %v", hiveDeployedImg)
latestHiveVer := getLatestHiveVersion()
if strings.Contains(hiveDeployedImg, latestHiveVer) {
e2e.Logf("The deployed Hive image is already the lastest.")
} else {
e2e.Logf("The deployed Hive image is NOT the lastest, patched to the latest version: %v", latestHiveVer)
patchYaml := `[{"op": "replace", "path": "/spec/install/spec/deployments/0/spec/template/spec/containers/0/image", "value": "quay.io/app-sre/hive:versiontobepatched"}]`
patchYaml = strings.Replace(patchYaml, "versiontobepatched", latestHiveVer, 1)
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", sub.namespace, "csv", sub.installedCSV, "--type=json", "-p", patchYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
} else {
e2e.Logf("hive subscription already exists.")
}
} | hive | |||
function | openshift/openshift-tests-private | 6aae21c1-d161-463f-823a-fef6d70b0a10 | findInstalledCSV | ['"strings"'] | ['subscription'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (sub *subscription) findInstalledCSV(oc *exutil.CLI) {
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "AtLatestKnown", ok, DefaultTimeout, []string{"sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.state}"}).check(oc)
installedCSV := getResource(oc, asAdmin, withoutNamespace, "sub", sub.name, "-n", sub.namespace, "-o=jsonpath={.status.installedCSV}")
o.Expect(installedCSV).NotTo(o.BeEmpty())
if strings.Compare(sub.installedCSV, installedCSV) != 0 {
sub.installedCSV = installedCSV
}
e2e.Logf("the installed CSV name is %s", sub.installedCSV)
} | hive | |||
function | openshift/openshift-tests-private | 0f5950c4-8b51-450f-943d-dbdfb77435df | create | ['hiveconfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (hc *hiveconfig) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", hc.template, "-p", "LOGLEVEL="+hc.logLevel, "TARGETNAMESPACE="+hc.targetNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | bfec4a99-a6a7-4fd6-a4ca-97bd446e37d5 | createIfNotExist | ['"strings"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['hiveconfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (hc *hiveconfig) createIfNotExist(oc *exutil.CLI) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HiveConfig", "hive").Output()
if strings.Contains(output, "have a resource type") || err != nil {
e2e.Logf("No hivconfig, Create it.")
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", hc.template, "-p", "LOGLEVEL="+hc.logLevel, "TARGETNAMESPACE="+hc.targetNamespace)
o.Expect(err).NotTo(o.HaveOccurred())
//wait for pods running
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-clustersync", ok, WaitingForClusterOperatorsTimeout, []string{"pod", "--selector=control-plane=clustersync",
"-n", HiveNamespace, "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"pod", "--selector=control-plane=clustersync", "-n",
HiveNamespace, "-o=jsonpath={.items[0].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", ok, DefaultTimeout, []string{"pod", "--selector=control-plane=controller-manager",
"-n", HiveNamespace, "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running", ok, DefaultTimeout, []string{"pod", "--selector=control-plane=controller-manager", "-n",
HiveNamespace, "-o=jsonpath={.items[0].status.phase}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hiveadmission", ok, DefaultTimeout, []string{"pod", "--selector=app=hiveadmission",
"-n", HiveNamespace, "-o=jsonpath={.items[*].metadata.name}"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, compare, "Running Running", ok, DefaultTimeout, []string{"pod", "--selector=app=hiveadmission", "-n",
HiveNamespace, "-o=jsonpath={.items[*].status.phase}"}).check(oc)
} else {
e2e.Logf("hivconfig already exists.")
}
} | hive | |||
function | openshift/openshift-tests-private | a3d265d3-20b7-4558-a328-6e49b83a8798 | create | ['clusterImageSet'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (imageset *clusterImageSet) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", imageset.template, "-p", "NAME="+imageset.name, "RELEASEIMAGE="+imageset.releaseImage)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | 0fa7d2ce-20df-4f9e-aae5-1ecaf19cdff7 | create | ['"strconv"'] | ['clusterPool'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (pool *clusterPool) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pool.template, "-p", "NAME="+pool.name, "NAMESPACE="+pool.namespace, "FAKE="+pool.fake, "BASEDOMAIN="+pool.baseDomain, "IMAGESETREF="+pool.imageSetRef, "PLATFORMTYPE="+pool.platformType, "CREDREF="+pool.credRef, "REGION="+pool.region, "PULLSECRETREF="+pool.pullSecretRef, "SIZE="+strconv.Itoa(pool.size), "MAXSIZE="+strconv.Itoa(pool.maxSize), "RUNNINGCOUNT="+strconv.Itoa(pool.runningCount), "MAXCONCURRENT="+strconv.Itoa(pool.maxConcurrent), "HIBERNATEAFTER="+pool.hibernateAfter)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | 30240634-9f43-4363-8648-c4c91e27819b | create | ['clusterClaim'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (claim *clusterClaim) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", claim.template, "-p", "NAME="+claim.name, "NAMESPACE="+claim.namespace, "CLUSTERPOOLNAME="+claim.clusterPoolName)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | 1e2c4ce0-8540-4990-8b2c-8dd35ebe9551 | create | ['"github.com/aws/aws-sdk-go-v2/config"'] | ['installConfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (config *installConfig) create(oc *exutil.CLI) {
// Set default values
if config.publish == "" {
config.publish = PublishExternal
}
if config.vmType == "" {
config.vmType = AWSVmTypeAMD64
}
if config.arch == "" {
config.arch = archAMD64
}
parameters := []string{"--ignore-unknown-parameters=true", "-f", config.template, "-p", "NAME1=" + config.name1, "NAMESPACE=" + config.namespace, "BASEDOMAIN=" + config.baseDomain, "NAME2=" + config.name2, "REGION=" + config.region, "PUBLISH=" + config.publish, "VMTYPE=" + config.vmType, "ARCH=" + config.arch}
if len(config.credentialsMode) > 0 {
parameters = append(parameters, "CREDENTIALSMODE="+config.credentialsMode)
}
if len(config.internalJoinSubnet) == 0 {
parameters = append(parameters, "INTERNALJOINSUBNET="+defaultAWSInternalJoinSubnet)
} else {
parameters = append(parameters, "INTERNALJOINSUBNET="+config.internalJoinSubnet)
}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | 02880961-90d3-4576-95d9-21964cae0b58 | create | ['"github.com/aws/aws-sdk-go-v2/config"'] | ['installConfigPrivateLink'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (config *installConfigPrivateLink) create(oc *exutil.CLI) {
// Set default values
if config.publish == "" {
config.publish = PublishExternal
}
if config.vmType == "" {
config.vmType = AWSVmTypeAMD64
}
if config.arch == "" {
config.arch = archAMD64
}
parameters := []string{"--ignore-unknown-parameters=true", "-f", config.template, "-p", "NAME1=" + config.name1, "NAMESPACE=" + config.namespace, "BASEDOMAIN=" + config.baseDomain, "NAME2=" + config.name2, "REGION=" + config.region, "PUBLISH=" + config.publish, "VMTYPE=" + config.vmType, "ARCH=" + config.arch}
if len(config.credentialsMode) > 0 {
parameters = append(parameters, "CREDENTIALSMODE="+config.credentialsMode)
}
if len(config.internalJoinSubnet) == 0 {
parameters = append(parameters, "INTERNALJOINSUBNET="+defaultAWSInternalJoinSubnet)
} else {
parameters = append(parameters, "INTERNALJOINSUBNET="+config.internalJoinSubnet)
}
if len(config.privateSubnetId1) > 0 {
parameters = append(parameters, "PRIVATESUBNETID1="+config.privateSubnetId1)
}
if len(config.privateSubnetId2) > 0 {
parameters = append(parameters, "PRIVATESUBNETID2="+config.privateSubnetId2)
}
if len(config.privateSubnetId3) > 0 {
parameters = append(parameters, "PRIVATESUBNETID3="+config.privateSubnetId3)
}
if len(config.machineNetworkCidr) == 0 {
parameters = append(parameters, "MACHINENETWORKCIDR="+defaultAWSMachineNetworkCidr)
} else {
parameters = append(parameters, "MACHINENETWORKCIDR="+config.machineNetworkCidr)
}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | ddfca771-6f4c-4e4a-b5ff-e4cf85f6f1d0 | create | ['"strconv"'] | ['clusterDeployment'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (cluster *clusterDeployment) create(oc *exutil.CLI) {
parameters := []string{"--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "FAKE=" + cluster.fake, "NAME=" + cluster.name, "NAMESPACE=" + cluster.namespace, "BASEDOMAIN=" + cluster.baseDomain, "CLUSTERNAME=" + cluster.clusterName, "MANAGEDNS=" + strconv.FormatBool(cluster.manageDNS), "PLATFORMTYPE=" + cluster.platformType, "CREDREF=" + cluster.credRef, "REGION=" + cluster.region, "IMAGESETREF=" + cluster.imageSetRef, "INSTALLCONFIGSECRET=" + cluster.installConfigSecret, "PULLSECRETREF=" + cluster.pullSecretRef, "INSTALLATTEMPTSLIMIT=" + strconv.Itoa(cluster.installAttemptsLimit)}
if len(cluster.installerType) > 0 {
parameters = append(parameters, "INSTALLERTYPE="+cluster.installerType)
} else {
parameters = append(parameters, "INSTALLERTYPE=installer")
}
if len(cluster.customizedTag) > 0 {
parameters = append(parameters, "CUSTOMIZEDTAG="+cluster.customizedTag)
} else {
parameters = append(parameters, "CUSTOMIZEDTAG="+AWSDefaultCDTag)
}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | 468162de-cbe1-4dc0-9698-b88b944e53d9 | create | ['"strconv"'] | ['clusterDeploymentAssumeRole'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (cluster *clusterDeploymentAssumeRole) create(oc *exutil.CLI) {
parameters := []string{"--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "FAKE=" + cluster.fake, "INSTALLERTYPE=" + cluster.installerType, "NAME=" + cluster.name, "NAMESPACE=" + cluster.namespace, "BASEDOMAIN=" + cluster.baseDomain, "BOUND_SERVICE_ACCOUNT_SIGNING_KEY_SECRET_REF=" + cluster.boundServiceAccountSigningKeySecretRef, "ROLEARN=" + cluster.roleARN, "EXTERNALID=" + cluster.externalID, "CLUSTERNAME=" + cluster.clusterName, "MANAGEDNS=" + strconv.FormatBool(cluster.manageDNS), "PLATFORMTYPE=" + cluster.platformType, "REGION=" + cluster.region, "MANIFESTS_SECRET_REF=" + cluster.manifestsSecretRef, "IMAGESETREF=" + cluster.imageSetRef, "INSTALLCONFIGSECRET=" + cluster.installConfigSecret, "PULLSECRETREF=" + cluster.pullSecretRef, "INSTALLATTEMPTSLIMIT=" + strconv.Itoa(cluster.installAttemptsLimit)}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | e4b93e85-01e7-4a05-b629-af72ea82312a | create | ['"strconv"'] | ['clusterDeploymentAdopt'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (cluster *clusterDeploymentAdopt) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "NAME="+cluster.name, "NAMESPACE="+cluster.namespace, "BASEDOMAIN="+cluster.baseDomain, "ADMINKUBECONFIGREF="+cluster.adminKubeconfigRef, "CLUSTERID="+cluster.clusterID, "INFRAID="+cluster.infraID, "CLUSTERNAME="+cluster.clusterName, "MANAGEDNS="+strconv.FormatBool(cluster.manageDNS), "PLATFORMTYPE="+cluster.platformType, "CREDREF="+cluster.credRef, "REGION="+cluster.region, "PULLSECRETREF="+cluster.pullSecretRef, "PRESERVEONDELETE="+strconv.FormatBool(cluster.preserveOnDelete))
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | 981e567d-a083-4a8d-920b-0619213495b5 | create | ['"strconv"'] | ['clusterDeploymentPrivateLink'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (cluster *clusterDeploymentPrivateLink) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "FAKE="+cluster.fake, "NAME="+cluster.name, "NAMESPACE="+cluster.namespace, "BASEDOMAIN="+cluster.baseDomain, "CLUSTERNAME="+cluster.clusterName, "MANAGEDNS="+strconv.FormatBool(cluster.manageDNS), "CREDREF="+cluster.credRef, "REGION="+cluster.region, "IMAGESETREF="+cluster.imageSetRef, "INSTALLCONFIGSECRET="+cluster.installConfigSecret, "PULLSECRETREF="+cluster.pullSecretRef, "INSTALLATTEMPTSLIMIT="+strconv.Itoa(cluster.installAttemptsLimit))
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | 9cf1b09f-3f52-436f-8073-eefccbe3ca57 | create | ['"strconv"'] | ['machinepool'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (machine *machinepool) create(oc *exutil.CLI) {
// Set default values
if machine.gcpSecureBoot == "" {
machine.gcpSecureBoot = "Disabled"
}
if machine.customizedTag == "" {
machine.customizedTag = AWSDefaultMPTag
}
parameters := []string{"--ignore-unknown-parameters=true", "-f", machine.template, "-p", "CLUSTERNAME=" + machine.clusterName, "NAMESPACE=" + machine.namespace, "IOPS=" + strconv.Itoa(machine.iops), "AUTHENTICATION=" + machine.authentication, "SECUREBOOT=" + machine.gcpSecureBoot, "CUSTOMIZEDTAG=" + machine.customizedTag}
if len(machine.networkProjectID) > 0 {
parameters = append(parameters, "NETWORKPROJECTID="+machine.networkProjectID)
}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | 02cec1c8-ea6a-4875-ae29-d21a7a6ac629 | create | ['syncSetResource'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (syncresource *syncSetResource) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", syncresource.template, "-p", "NAME="+syncresource.name, "NAMESPACE="+syncresource.namespace, "CDREFNAME="+syncresource.cdrefname, "NAMESPACE2="+syncresource.namespace2, "RAMODE="+syncresource.ramode, "APPLYBEHAVIOR="+syncresource.applybehavior, "CMNAME="+syncresource.cmname, "CMNAMESPACE="+syncresource.cmnamespace)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | a8452e87-63db-4272-99ee-555c9c2d4b02 | create | ['syncSetPatch'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (syncpatch *syncSetPatch) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", syncpatch.template, "-p", "NAME="+syncpatch.name, "NAMESPACE="+syncpatch.namespace, "CDREFNAME="+syncpatch.cdrefname, "CMNAME="+syncpatch.cmname, "CMNAMESPACE="+syncpatch.cmnamespace, "PCONTENT="+syncpatch.pcontent, "PATCHTYPE="+syncpatch.patchType)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | 14adcbb4-7702-48c0-9a44-a74f27e84e76 | create | ['syncSetSecret'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (syncsecret *syncSetSecret) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", syncsecret.template, "-p", "NAME="+syncsecret.name, "NAMESPACE="+syncsecret.namespace, "CDREFNAME="+syncsecret.cdrefname, "SNAME="+syncsecret.sname, "SNAMESPACE="+syncsecret.snamespace, "TNAME="+syncsecret.tname, "TNAMESPACE="+syncsecret.tnamespace)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | ddeab446-578b-41ac-8046-2c51b60c3527 | create | ['"github.com/aws/aws-sdk-go-v2/config"'] | ['azureInstallConfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (config *azureInstallConfig) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", config.template, "-p", "NAME1="+config.name1, "NAMESPACE="+config.namespace, "BASEDOMAIN="+config.baseDomain, "NAME2="+config.name2, "RESGROUP="+config.resGroup, "AZURETYPE="+config.azureType, "REGION="+config.region)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | 08c997f6-89cb-4823-852a-30703a797973 | create | ['azureClusterDeployment'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (cluster *azureClusterDeployment) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "FAKE="+cluster.fake, "COPYCLIDOMAIN="+cluster.copyCliDomain, "NAME="+cluster.name, "NAMESPACE="+cluster.namespace, "BASEDOMAIN="+cluster.baseDomain, "CLUSTERNAME="+cluster.clusterName, "PLATFORMTYPE="+cluster.platformType, "CREDREF="+cluster.credRef, "REGION="+cluster.region, "RESGROUP="+cluster.resGroup, "AZURETYPE="+cluster.azureType, "IMAGESETREF="+cluster.imageSetRef, "INSTALLCONFIGSECRET="+cluster.installConfigSecret, "INSTALLERIMAGEOVERRIDE="+cluster.installerImageOverride, "PULLSECRETREF="+cluster.pullSecretRef)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | f70640d7-1fc8-43c5-9b7a-2662b03d9907 | create | ['"strconv"'] | ['azureClusterPool'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (pool *azureClusterPool) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pool.template, "-p", "NAME="+pool.name, "NAMESPACE="+pool.namespace, "FAKE="+pool.fake, "BASEDOMAIN="+pool.baseDomain, "IMAGESETREF="+pool.imageSetRef, "PLATFORMTYPE="+pool.platformType, "CREDREF="+pool.credRef, "REGION="+pool.region, "RESGROUP="+pool.resGroup, "PULLSECRETREF="+pool.pullSecretRef, "SIZE="+strconv.Itoa(pool.size), "MAXSIZE="+strconv.Itoa(pool.maxSize), "RUNNINGCOUNT="+strconv.Itoa(pool.runningCount), "MAXCONCURRENT="+strconv.Itoa(pool.maxConcurrent), "HIBERNATEAFTER="+pool.hibernateAfter)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | 5904e296-6b90-453f-8584-739d2c455a41 | create | ['"github.com/aws/aws-sdk-go-v2/config"'] | ['gcpInstallConfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (config *gcpInstallConfig) create(oc *exutil.CLI) {
// Set default values
if config.secureBoot == "" {
config.secureBoot = "Disabled"
}
parameters := []string{"--ignore-unknown-parameters=true", "-f", config.template, "-p", "NAME1=" + config.name1, "NAMESPACE=" + config.namespace, "BASEDOMAIN=" + config.baseDomain, "NAME2=" + config.name2, "REGION=" + config.region, "PROJECTID=" + config.projectid, "SECUREBOOT=" + config.secureBoot}
if len(config.computeSubnet) > 0 {
parameters = append(parameters, "COMPUTESUBNET="+config.computeSubnet)
}
if len(config.controlPlaneSubnet) > 0 {
parameters = append(parameters, "CONTROLPLANESUBNET="+config.controlPlaneSubnet)
}
if len(config.network) > 0 {
parameters = append(parameters, "NETWORK="+config.network)
}
if len(config.networkProjectId) > 0 {
parameters = append(parameters, "NETWORKPROJECTID="+config.networkProjectId)
}
err := applyResourceFromTemplate(oc, parameters...)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | 9cdf7fa2-80de-4ffc-8804-8035cb1b785c | create | ['"strconv"'] | ['gcpClusterDeployment'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (cluster *gcpClusterDeployment) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cluster.template, "-p", "FAKE="+cluster.fake, "NAME="+cluster.name, "NAMESPACE="+cluster.namespace, "BASEDOMAIN="+cluster.baseDomain, "CLUSTERNAME="+cluster.clusterName, "PLATFORMTYPE="+cluster.platformType, "CREDREF="+cluster.credRef, "REGION="+cluster.region, "IMAGESETREF="+cluster.imageSetRef, "INSTALLCONFIGSECRET="+cluster.installConfigSecret, "PULLSECRETREF="+cluster.pullSecretRef, "INSTALLERIMAGEOVERRIDE="+cluster.installerImageOverride, "INSTALLATTEMPTSLIMIT="+strconv.Itoa(cluster.installAttemptsLimit))
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | f4d6be35-e351-4c5b-9a94-36bffd426ded | create | ['"strconv"'] | ['gcpClusterPool'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (pool *gcpClusterPool) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pool.template, "-p", "NAME="+pool.name, "NAMESPACE="+pool.namespace, "FAKE="+pool.fake, "BASEDOMAIN="+pool.baseDomain, "IMAGESETREF="+pool.imageSetRef, "PLATFORMTYPE="+pool.platformType, "CREDREF="+pool.credRef, "REGION="+pool.region, "PULLSECRETREF="+pool.pullSecretRef, "SIZE="+strconv.Itoa(pool.size), "MAXSIZE="+strconv.Itoa(pool.maxSize), "RUNNINGCOUNT="+strconv.Itoa(pool.runningCount), "MAXCONCURRENT="+strconv.Itoa(pool.maxConcurrent), "HIBERNATEAFTER="+pool.hibernateAfter)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | aebd96da-1f19-40e7-b95a-5234a308619d | create | ['vSphereInstallConfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (ic *vSphereInstallConfig) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", ic.template,
"-p", "SECRETNAME="+ic.secretName, "SECRETNS="+ic.secretNs, "BASEDOMAIN="+ic.baseDomain,
"ICNAME="+ic.icName, "MACHINENETWORK="+ic.machineNetwork, "APIVIP="+ic.apiVip, "CLUSTER="+ic.cluster,
"DATACENTER="+ic.datacenter, "DATASTORE="+ic.datastore, "INGRESSVIP="+ic.ingressVip, "NETWORK="+ic.network,
"PASSWORD="+ic.password, "USERNAME="+ic.username, "VCENTER="+ic.vCenter)
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | 5fcd156d-fae6-4877-bba7-913e44513ed1 | create | ['"strconv"'] | ['vSphereClusterDeployment'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (cluster *vSphereClusterDeployment) create(oc *exutil.CLI) {
err := applyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", cluster.template,
"-p", "FAKE="+strconv.FormatBool(cluster.fake), "NAME="+cluster.name, "NAMESPACE="+cluster.namespace,
"BASEDOMAIN="+cluster.baseDomain, "MANAGEDNS="+strconv.FormatBool(cluster.manageDns),
"CLUSTERNAME="+cluster.clusterName, "CERTREF="+cluster.certRef, "CLUSTER="+cluster.cluster,
"CREDREF="+cluster.credRef, "DATACENTER="+cluster.datacenter, "DATASTORE="+cluster.datastore,
"NETWORK="+cluster.network, "VCENTER="+cluster.vCenter, "IMAGESETREF="+cluster.imageSetRef,
"INSTALLCONFIGSECRET="+cluster.installConfigSecret, "PULLSECRETREF="+cluster.pullSecretRef,
"INSTALLATTEMPTSLIMIT="+strconv.Itoa(cluster.installAttemptsLimit))
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | |||
function | openshift/openshift-tests-private | f8b7ac33-1776-4782-8d28-389f2fcda3e0 | getResource | ['"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getResource(oc *exutil.CLI, asAdmin bool, withoutNamespace bool, parameters ...string) string {
var result string
err := wait.Poll(3*time.Second, 120*time.Second, func() (bool, error) {
output, err := doAction(oc, "get", asAdmin, withoutNamespace, parameters...)
if err != nil {
e2e.Logf("the get error is %v, and try next", err)
return false, nil
}
result = output
return true, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("cat not get %v without empty", parameters))
e2e.Logf("the result of queried resource:%v", result)
return result
} | hive | ||||
function | openshift/openshift-tests-private | daec8983-0c33-4152-b866-ddcf6528dbfa | doAction | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func doAction(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, parameters ...string) (string, error) {
if asAdmin && withoutNamespace {
return oc.AsAdmin().WithoutNamespace().Run(action).Args(parameters...).Output()
}
if asAdmin && !withoutNamespace {
return oc.AsAdmin().Run(action).Args(parameters...).Output()
}
if !asAdmin && withoutNamespace {
return oc.WithoutNamespace().Run(action).Args(parameters...).Output()
}
if !asAdmin && !withoutNamespace {
return oc.Run(action).Args(parameters...).Output()
}
return "", nil
} | hive | |||||
function | openshift/openshift-tests-private | 72d22b0e-1d68-47ba-9903-c96fda8c9db5 | newCheck | ['checkDescription'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func newCheck(method string, action string, executor bool, inlineNamespace bool, expectAction bool,
expectContent string, expect bool, timeout int, resource []string) checkDescription {
return checkDescription{
method: method,
action: action,
executor: executor,
inlineNamespace: inlineNamespace,
expectAction: expectAction,
expectContent: expectContent,
expect: expect,
timeout: timeout,
resource: resource,
}
} | hive | ||||
function | openshift/openshift-tests-private | 6e111dec-b01b-4426-81b1-ccbfbd9042bb | check | ['"fmt"'] | ['checkDescription'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (ck checkDescription) check(oc *exutil.CLI) {
switch ck.method {
case "present":
ok := isPresentResource(oc, ck.action, ck.executor, ck.inlineNamespace, ck.expectAction, ck.resource...)
o.Expect(ok).To(o.BeTrue())
case "expect":
err := expectedResource(oc, ck.action, ck.executor, ck.inlineNamespace, ck.expectAction, ck.expectContent, ck.expect, ck.timeout, ck.resource...)
exutil.AssertWaitPollNoErr(err, "can not get expected result")
default:
err := fmt.Errorf("unknown method")
o.Expect(err).NotTo(o.HaveOccurred())
}
} | hive | |||
function | openshift/openshift-tests-private | 8faf2420-b5c4-4ea2-a381-f51f7fa4b1cd | isPresentResource | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func isPresentResource(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, present bool, parameters ...string) bool {
parameters = append(parameters, "--ignore-not-found")
err := wait.Poll(3*time.Second, 60*time.Second, func() (bool, error) {
output, err := doAction(oc, action, asAdmin, withoutNamespace, parameters...)
if err != nil {
e2e.Logf("the get error is %v, and try next", err)
return false, nil
}
if !present && strings.Compare(output, "") == 0 {
return true, nil
}
if present && strings.Compare(output, "") != 0 {
return true, nil
}
return false, nil
})
return err == nil
} | hive | ||||
function | openshift/openshift-tests-private | a8c13807-5eb1-43bd-bd25-f294dce45bf8 | expectedResource | ['"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func expectedResource(oc *exutil.CLI, action string, asAdmin bool, withoutNamespace bool, isCompare bool, content string, expect bool, timeout int, parameters ...string) error {
cc := func(a, b string, ic bool) bool {
bs := strings.Split(b, "+2+")
ret := false
for _, s := range bs {
if (ic && strings.Compare(a, s) == 0) || (!ic && strings.Contains(a, s)) {
ret = true
}
}
return ret
}
var interval, inputTimeout time.Duration
if timeout >= ClusterInstallTimeout {
inputTimeout = time.Duration(timeout/60) * time.Minute
interval = 3 * time.Minute
} else {
inputTimeout = time.Duration(timeout) * time.Second
interval = time.Duration(timeout/60) * time.Second
}
return wait.Poll(interval, inputTimeout, func() (bool, error) {
output, err := doAction(oc, action, asAdmin, withoutNamespace, parameters...)
if err != nil {
e2e.Logf("the get error is %v, and try next", err)
return false, nil
}
e2e.Logf("the queried resource:%s", output)
if isCompare && expect && cc(output, content, isCompare) {
e2e.Logf("the output %s matches one of the content %s, expected", output, content)
return true, nil
}
if isCompare && !expect && !cc(output, content, isCompare) {
e2e.Logf("the output %s does not match the content %s, expected", output, content)
return true, nil
}
if !isCompare && expect && cc(output, content, isCompare) {
e2e.Logf("the output %s contains one of the content %s, expected", output, content)
return true, nil
}
if !isCompare && !expect && !cc(output, content, isCompare) {
e2e.Logf("the output %s does not contain the content %s, expected", output, content)
return true, nil
}
return false, nil
})
} | hive | ||||
function | openshift/openshift-tests-private | f90ef271-4033-467f-ba42-c909fcb73b64 | cleanupObjects | ['"strings"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['objectTableRef'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func cleanupObjects(oc *exutil.CLI, objs ...objectTableRef) {
for _, v := range objs {
e2e.Logf("Start to remove: %v", v)
//Print out debugging info if CD installed is false
var provisionPodOutput, installedFlag string
if v.kind == "ClusterPool" {
if v.namespace != "" {
cdListStr := getCDlistfromPool(oc, v.name)
var cdArray []string
cdArray = strings.Split(strings.TrimSpace(cdListStr), "\n")
for i := range cdArray {
installedFlag, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterDeployment", "-n", cdArray[i], cdArray[i], "-o=jsonpath={.spec.installed}").Output()
if installedFlag == "false" {
failedCdName := cdArray[i]
e2e.Logf("failedCdName is %s", failedCdName)
//At present, the maximum size of clusterpool in auto test is 2, we can print them all to get more information if cd installed is false
printStatusConditions(oc, "ClusterDeployment", failedCdName, failedCdName)
printProvisionPodLogs(oc, provisionPodOutput, failedCdName)
}
}
}
} else if v.kind == "ClusterDeployment" {
installedFlag, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(v.kind, "-n", v.namespace, v.name, "-o=jsonpath={.spec.installed}").Output()
if installedFlag == "false" {
printStatusConditions(oc, v.kind, v.namespace, v.name)
printProvisionPodLogs(oc, provisionPodOutput, v.namespace)
}
}
if v.namespace != "" {
oc.AsAdmin().WithoutNamespace().Run("delete").Args(v.kind, "-n", v.namespace, v.name, "--ignore-not-found").Output()
} else {
oc.AsAdmin().WithoutNamespace().Run("delete").Args(v.kind, v.name, "--ignore-not-found").Output()
}
//For ClusterPool or ClusterDeployment, need to wait ClusterDeployment delete done
if v.kind == "ClusterPool" || v.kind == "ClusterDeployment" {
e2e.Logf("Wait ClusterDeployment delete done for %s", v.name)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, v.name, nok, ClusterUninstallTimeout, []string{"ClusterDeployment", "-A"}).check(oc)
}
}
} | hive | |||
function | openshift/openshift-tests-private | 8995fa1e-303e-43ab-ac2e-66837709bd64 | printStatusConditions | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func printStatusConditions(oc *exutil.CLI, kind, namespace, name string) {
statusConditions, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(kind, "-n", namespace, name, "-o=jsonpath={.status.conditions}").Output()
if len(statusConditions) <= LogsLimitLen {
e2e.Logf("statusConditions is %s", statusConditions)
} else {
e2e.Logf("statusConditions is %s", statusConditions[:LogsLimitLen])
}
} | hive | |||||
function | openshift/openshift-tests-private | 8c5092ef-a68c-4c00-9bd8-5d04ddd741c4 | printProvisionPodLogs | ['"os"', '"os/exec"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func printProvisionPodLogs(oc *exutil.CLI, provisionPodOutput, namespace string) {
provisionPodOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "hive.openshift.io/job-type=provision", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output()
e2e.Logf("provisionPodOutput is %s", provisionPodOutput)
//if err == nil , print out provision pod logs
if err == nil && len(strings.TrimSpace(provisionPodOutput)) > 0 {
var provisionPod []string
provisionPod = strings.Split(strings.TrimSpace(provisionPodOutput), " ")
e2e.Logf("provisionPod is %s", provisionPod)
if len(provisionPod) > 0 {
e2e.Logf("provisionPod len is %d. provisionPod[0] is %s", len(provisionPod), provisionPod[0])
provisionPodLogsFile := "logs_output_" + getRandomString()[:ClusterSuffixLen] + ".txt"
provisionPodLogs, _ := oc.AsAdmin().WithoutNamespace().Run("logs").Args(provisionPod[0], "-c", "hive", "-n", namespace).OutputToFile(provisionPodLogsFile)
defer os.Remove(provisionPodLogs)
failLogs, _ := exec.Command("bash", "-c", "grep -E 'level=error|level=fatal' "+provisionPodLogs).Output()
if len(failLogs) <= LogsLimitLen {
e2e.Logf("provisionPodLogs is %s", failLogs)
} else {
e2e.Logf("provisionPodLogs is %s", failLogs[len(failLogs)-LogsLimitLen:])
}
}
}
} | hive | ||||
function | openshift/openshift-tests-private | fd6569d1-8971-48eb-af94-193dc36300fe | ContainsInStringSlice | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func ContainsInStringSlice(items []string, item string) bool {
for _, eachItem := range items {
if eachItem == item {
return true
}
}
return false
} | hive | |||||
function | openshift/openshift-tests-private | bd9ce690-a193-4ece-ad27-16c5eb0989b6 | getInfraIDFromCDName | ['"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getInfraIDFromCDName(oc *exutil.CLI, cdName string) string {
var (
infraID string
err error
)
getInfraIDFromCD := func() bool {
infraID, _, err = oc.AsAdmin().Run("get").Args("cd", cdName, "-o=jsonpath={.spec.clusterMetadata.infraID}").Outputs()
return err == nil && strings.HasPrefix(infraID, cdName)
}
o.Eventually(getInfraIDFromCD).WithTimeout(10 * time.Minute).WithPolling(5 * time.Second).Should(o.BeTrue())
e2e.Logf("Found infraID = %v", infraID)
return infraID
} | hive | ||||
function | openshift/openshift-tests-private | 9124de48-7776-4da6-a51f-bcb78cc974dc | getClusterprovisionName | ['"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getClusterprovisionName(oc *exutil.CLI, cdName, namespace string) string {
var ClusterprovisionName string
var err error
waitForClusterprovision := func() bool {
ClusterprovisionName, _, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ClusterDeployment", cdName, "-n", namespace, "-o=jsonpath={.status.provisionRef.name}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(ClusterprovisionName, cdName) {
return true
} else {
return false
}
}
o.Eventually(waitForClusterprovision).WithTimeout(DefaultTimeout * time.Second).WithPolling(3 * time.Second).Should(o.BeTrue())
return ClusterprovisionName
} | hive | ||||
function | openshift/openshift-tests-private | 49ea77c4-18c8-4f53-acd8-1bb3f8c43b42 | getProvisionPodNames | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getProvisionPodNames(oc *exutil.CLI, cdName, namespace string) (provisionPodNames []string) {
// For "kubectl get", the default sorting order is alphabetical
stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "hive.openshift.io/job-type=provision", "-l", "hive.openshift.io/cluster-deployment-name="+cdName, "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
for _, provisionPodName := range strings.Split(stdout, " ") {
o.Expect(provisionPodName).To(o.ContainSubstring("provision"))
o.Expect(provisionPodName).To(o.ContainSubstring(cdName))
provisionPodNames = append(provisionPodNames, provisionPodName)
}
return
} | hive | ||||
function | openshift/openshift-tests-private | c48541e5-87a7-4b2c-951a-34cf0c4e4033 | getDeprovisionPodName | ['"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getDeprovisionPodName(oc *exutil.CLI, cdName, namespace string) string {
var DeprovisionPodName string
var err error
waitForDeprovisionPod := func() bool {
DeprovisionPodName, _, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "hive.openshift.io/job-type=deprovision", "-l", "hive.openshift.io/cluster-deployment-name="+cdName, "-n", namespace, "-o=jsonpath={.items[0].metadata.name}").Outputs()
o.Expect(err).NotTo(o.HaveOccurred())
if strings.Contains(DeprovisionPodName, cdName) && strings.Contains(DeprovisionPodName, "uninstall") {
return true
} else {
return false
}
}
o.Eventually(waitForDeprovisionPod).WithTimeout(DefaultTimeout * time.Second).WithPolling(3 * time.Second).Should(o.BeTrue())
return DeprovisionPodName
} | hive | ||||
function | openshift/openshift-tests-private | 77e9e5a7-9256-43e8-ad44-e5ebd70d939b | assertLogs | ['"bufio"', '"os"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func assertLogs(logStream *os.File, targetLines []string, lineTransformation func(line string) string, timeout time.Duration) bool {
// Set timeout (applies to future AND currently-blocked Read calls)
endTime := time.Now().Add(timeout)
err := logStream.SetReadDeadline(endTime)
o.Expect(err).NotTo(o.HaveOccurred())
// Default line transformation: the identity function
if lineTransformation == nil {
e2e.Logf("Using default line transformation (the identity function)")
lineTransformation = func(line string) string { return line }
}
// Line scanning
scanner := bufio.NewScanner(logStream)
targetIdx := 0
// In case of timeout, current & subsequent Read calls error out, resulting in scanner.Scan() returning false immediately
for scanner.Scan() {
switch tranformedLine, targetLine := lineTransformation(scanner.Text()), targetLines[targetIdx]; {
// We have a match, proceed to the next target line
case targetIdx == 0 && strings.HasSuffix(tranformedLine, targetLine) ||
targetIdx == len(targetLines)-1 && strings.HasPrefix(tranformedLine, targetLine) ||
tranformedLine == targetLine:
if targetIdx++; targetIdx == len(targetLines) {
e2e.Logf("Found substring [%v] in the logs", strings.Join(targetLines, "\n"))
return true
}
// Restart from target line 0
default:
targetIdx = 0
}
}
return false
} | hive | ||||
function | openshift/openshift-tests-private | b58cad01-ae66-4722-8811-7fcc5f86cfac | removeResource | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func removeResource(oc *exutil.CLI, parameters ...string) {
output, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args(parameters...).Output()
if err != nil && (strings.Contains(output, "NotFound") || strings.Contains(output, "No resources found")) {
e2e.Logf("No resource found!")
return
}
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | 08182bb6-e89a-4182-83ee-7eb4d5738f51 | delete | ['hiveconfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func (hc *hiveconfig) delete(oc *exutil.CLI) {
removeResource(oc, "hiveconfig", "hive")
} | hive | ||||
function | openshift/openshift-tests-private | 0a349588-9f9d-40f7-b5cb-23f74c787ebc | createPullSecret | ['"os"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func createPullSecret(oc *exutil.CLI, namespace string) {
dirname := "/tmp/" + oc.Namespace() + "-pull"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--to="+dirname, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("create").Args("secret", "generic", "pull-secret", "--from-file="+dirname+"/.dockerconfigjson", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | bac859a2-366c-498c-81c7-f0486c2dc134 | createAWSCreds | ['"os"', '"github.com/aws/aws-sdk-go-v2/aws"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func createAWSCreds(oc *exutil.CLI, namespace string) {
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/aws-creds", "-n", "kube-system", "--to="+dirname, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("create").Args("secret", "generic", "aws-creds", "--from-file="+dirname+"/aws_access_key_id", "--from-file="+dirname+"/aws_secret_access_key", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | db484df1-d63d-44b6-b8d2-cba1da6f1ebb | createRoute53AWSCreds | ['"os"', '"strings"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/route53"', 'legoroute53 "github.com/go-acme/lego/v4/providers/dns/route53"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func createRoute53AWSCreds(oc *exutil.CLI, namespace string) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret", "route53-aws-creds", "-n", HiveNamespace).Output()
if strings.Contains(output, "NotFound") || err != nil {
e2e.Logf("No route53-aws-creds, Create it.")
dirname := "/tmp/" + oc.Namespace() + "-route53-creds"
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/aws-creds", "-n", "kube-system", "--to="+dirname, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "route53-aws-creds", "--from-file="+dirname+"/aws_access_key_id", "--from-file="+dirname+"/aws_secret_access_key", "-n", HiveNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
e2e.Logf("route53-aws-creds already exists.")
}
} | hive | ||||
function | openshift/openshift-tests-private | 57510a87-ee2a-436e-a753-14c1fde2812c | createAzureCreds | ['"bufio"', '"encoding/json"', '"fmt"', '"os"', '"github.com/aws/aws-sdk-go-v2/credentials"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func createAzureCreds(oc *exutil.CLI, namespace string) {
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
var azureClientID, azureClientSecret, azureSubscriptionID, azureTenantID string
azureClientID, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "--template='{{.data.azure_client_id | base64decode}}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
azureClientSecret, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "--template='{{.data.azure_client_secret | base64decode}}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
azureSubscriptionID, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "--template='{{.data.azure_subscription_id | base64decode}}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
azureTenantID, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "--template='{{.data.azure_tenant_id | base64decode}}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
//Convert credentials to osServicePrincipal.json format
output := fmt.Sprintf("{\"subscriptionId\":\"%s\",\"clientId\":\"%s\",\"clientSecret\":\"%s\",\"tenantId\":\"%s\"}", azureSubscriptionID[1:len(azureSubscriptionID)-1], azureClientID[1:len(azureClientID)-1], azureClientSecret[1:len(azureClientSecret)-1], azureTenantID[1:len(azureTenantID)-1])
outputFile, outputErr := os.OpenFile(dirname+"/osServicePrincipal.json", os.O_CREATE|os.O_WRONLY, 0666)
o.Expect(outputErr).NotTo(o.HaveOccurred())
defer outputFile.Close()
outputWriter := bufio.NewWriter(outputFile)
writeByte, writeError := outputWriter.WriteString(output)
o.Expect(writeError).NotTo(o.HaveOccurred())
writeError = outputWriter.Flush()
o.Expect(writeError).NotTo(o.HaveOccurred())
e2e.Logf("%d byte written to osServicePrincipal.json", writeByte)
err = oc.Run("create").Args("secret", "generic", AzureCreds, "--from-file="+dirname+"/osServicePrincipal.json", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | 09a4dd62-b379-47d6-8e52-006514f0d1f0 | createGCPCreds | ['"encoding/json"', '"os"', '"github.com/aws/aws-sdk-go-v2/credentials"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func createGCPCreds(oc *exutil.CLI, namespace string) {
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/gcp-credentials", "-n", "kube-system", "--to="+dirname, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("create").Args("secret", "generic", GCPCreds, "--from-file=osServiceAccount.json="+dirname+"/service_account.json", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | a60a6c0c-765d-43fc-bd1a-b65a8751a97a | createVSphereCreds | ['"context"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func createVSphereCreds(oc *exutil.CLI, namespace, vCenter string) {
username, password := getVSphereCredentials(oc, vCenter)
secret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: VSphereCreds,
Namespace: namespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"username": username,
"password": password,
},
}
_, err := oc.AdminKubeClient().CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
} | hive | ||||
function | openshift/openshift-tests-private | 2f3ade48-8f4b-49bd-ba18-a891d65354d9 | extractRelFromImg | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func extractRelFromImg(image string) string {
index := strings.Index(image, ":")
if index != -1 {
tempStr := image[index+1:]
index = strings.Index(tempStr, "-")
if index != -1 {
e2e.Logf("Extracted OCP release: %s", tempStr[:index])
return tempStr[:index]
}
}
e2e.Logf("Failed to extract OCP release from Image.")
return ""
} | hive | ||||
function | openshift/openshift-tests-private | 0c325ec9-6bc2-44c2-a71f-97c8b6d834b8 | getCDlistfromPool | ['"os"', '"os/exec"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getCDlistfromPool(oc *exutil.CLI, pool string) string {
fileName := "cd_output_" + getRandomString() + ".txt"
cdOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cd", "-A").OutputToFile(fileName)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.Remove(cdOutput)
poolCdList, err := exec.Command("bash", "-c", "cat "+cdOutput+" | grep "+pool+" | awk '{print $1}'").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("CD list is %s for pool %s", poolCdList, pool)
return string(poolCdList)
} | hive | ||||
function | openshift/openshift-tests-private | 0ef323f8-7639-4ed6-a4e3-58231efbc3cb | getClusterKubeconfig | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getClusterKubeconfig(oc *exutil.CLI, clustername, namespace, dir string) string {
kubeconfigsecretname, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cd", clustername, "-n", namespace, "-o=jsonpath={.spec.clusterMetadata.adminKubeconfigSecretRef.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Extract cluster %s kubeconfig to %s", clustername, dir)
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+kubeconfigsecretname, "-n", namespace, "--to="+dir, "--confirm").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
kubeConfigPath := dir + "/kubeconfig"
return kubeConfigPath
} | hive | |||||
function | openshift/openshift-tests-private | 358b421a-788d-440d-98c1-f49f04a726ef | checkResourceNumber | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func checkResourceNumber(oc *exutil.CLI, filterName string, resource []string) int {
resourceOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resource...).Output()
o.Expect(err).NotTo(o.HaveOccurred())
return strings.Count(resourceOutput, filterName)
} | hive | ||||
function | openshift/openshift-tests-private | 57649037-fdd6-4f47-971d-72f5770ce30a | getPullSecret | ['"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getPullSecret(oc *exutil.CLI) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/pull-secret", "-n", "openshift-config", `--template={{index .data ".dockerconfigjson" | base64decode}}`).OutputToFile("auth.dockerconfigjson")
} | hive | ||||
function | openshift/openshift-tests-private | 0767ed47-8ac6-4606-8877-8f212b46cf64 | getCommitID | ['"os"', '"os/exec"', '"strings"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getCommitID(oc *exutil.CLI, component string, clusterVersion string) (string, error) {
secretFile, secretErr := getPullSecret(oc)
defer os.Remove(secretFile)
if secretErr != nil {
return "", secretErr
}
outFilePath, ocErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "info", "--registry-config="+secretFile, "--commits", clusterVersion, "--insecure=true").OutputToFile("commitIdLogs.txt")
defer os.Remove(outFilePath)
if ocErr != nil {
return "", ocErr
}
commitID, cmdErr := exec.Command("bash", "-c", "cat "+outFilePath+" | grep "+component+" | awk '{print $3}'").Output()
return strings.TrimSuffix(string(commitID), "\n"), cmdErr
} | hive | ||||
function | openshift/openshift-tests-private | fc7d94ef-4510-42ba-b74f-32ec8fdc0673 | getPullSpec | ['"os"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getPullSpec(oc *exutil.CLI, component string, clusterVersion string) (string, error) {
secretFile, secretErr := getPullSecret(oc)
defer os.Remove(secretFile)
if secretErr != nil {
return "", secretErr
}
pullSpec, ocErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "info", "--registry-config="+secretFile, "--image-for="+component, clusterVersion, "--insecure=true").Output()
if ocErr != nil {
return "", ocErr
}
return pullSpec, nil
} | hive | ||||
function | openshift/openshift-tests-private | 9aa0ba62-abad-4033-b38f-51f8ea31c6a7 | exposeMetrics | ['"fmt"', '"path/filepath"', '"strings"', '"github.com/aws/aws-sdk-go-v2/config"'] | ['clusterMonitoringConfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func exposeMetrics(oc *exutil.CLI, testDataDir string, needRecoverPtr *bool, prevConfigPtr *string) {
// Look for cluster-level monitoring configuration
getOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
// Enable user workload monitoring
if len(getOutput) > 0 {
e2e.Logf("ConfigMap cluster-monitoring-config exists, extracting cluster-monitoring-config ...")
extractOutput, _, _ := oc.AsAdmin().WithoutNamespace().Run("extract").Args("ConfigMap/cluster-monitoring-config", "-n", "openshift-monitoring", "--to=-").Outputs()
if strings.Contains(strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(extractOutput, "'", ""), "\"", ""), " ", ""), "enableUserWorkload:true") {
e2e.Logf("User workload is enabled, doing nothing ... ")
*needRecoverPtr, *prevConfigPtr = false, ""
} else {
e2e.Logf("User workload is not enabled, enabling ...")
*needRecoverPtr, *prevConfigPtr = true, extractOutput
extractOutputParts := strings.Split(extractOutput, "\n")
containKeyword := false
for idx, part := range extractOutputParts {
if strings.Contains(part, "enableUserWorkload") {
e2e.Logf("Keyword \"enableUserWorkload\" found in cluster-monitoring-config, setting enableUserWorkload to true ...")
extractOutputParts[idx] = "enableUserWorkload: true"
containKeyword = true
break
}
}
if !containKeyword {
e2e.Logf("Keyword \"enableUserWorkload\" not found in cluster-monitoring-config, adding ...")
extractOutputParts = append(extractOutputParts, "enableUserWorkload: true")
}
modifiedExtractOutput := strings.ReplaceAll(strings.Join(extractOutputParts, "\\n"), "\"", "\\\"")
e2e.Logf("Patching ConfigMap cluster-monitoring-config to enable user workload monitoring ...")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--type", "merge", "-p", fmt.Sprintf("{\"data\":{\"config.yaml\": \"%s\"}}", modifiedExtractOutput)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
} else {
e2e.Logf("ConfigMap cluster-monitoring-config does not exist, creating ...")
*needRecoverPtr, *prevConfigPtr = true, ""
clusterMonitoringConfigTemp := clusterMonitoringConfig{
enableUserWorkload: true,
namespace: "openshift-monitoring",
template: filepath.Join(testDataDir, "cluster-monitoring-config.yaml"),
}
clusterMonitoringConfigTemp.create(oc)
}
// Check monitoring-related pods are created in the openshift-user-workload-monitoring namespace
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-operator", ok, DefaultTimeout, []string{"pod", "-n", "openshift-user-workload-monitoring"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-user-workload", ok, DefaultTimeout, []string{"pod", "-n", "openshift-user-workload-monitoring"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "thanos-ruler-user-workload", ok, DefaultTimeout, []string{"pod", "-n", "openshift-user-workload-monitoring"}).check(oc)
// Check if ServiceMonitors and PodMonitors are created
e2e.Logf("Checking if ServiceMonitors and PodMonitors exist ...")
getOutput, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ServiceMonitor", "hive-clustersync", "-n", HiveNamespace, "--ignore-not-found").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if len(getOutput) == 0 {
e2e.Logf("Creating PodMonitor for hive-operator ...")
podMonitorYaml := filepath.Join(testDataDir, "hive-operator-podmonitor.yaml")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", podMonitorYaml).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Creating ServiceMonitor for hive-controllers ...")
serviceMonitorControllers := filepath.Join(testDataDir, "hive-controllers-servicemonitor.yaml")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", serviceMonitorControllers).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("Creating ServiceMonitor for hive-clustersync ...")
serviceMonitorClustersync := filepath.Join(testDataDir, "hive-clustersync-servicemonitor.yaml")
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", serviceMonitorClustersync).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
} | hive | |||
function | openshift/openshift-tests-private | 978a47d6-1bc0-4a35-b99b-88e8f907685d | recoverClusterMonitoring | ['"fmt"', '"strings"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func recoverClusterMonitoring(oc *exutil.CLI, needRecoverPtr *bool, prevConfigPtr *string) {
if *needRecoverPtr {
e2e.Logf("Recovering cluster monitoring configurations ...")
if len(*prevConfigPtr) == 0 {
e2e.Logf("ConfigMap/cluster-monitoring-config did not exist before calling exposeMetrics, deleting ...")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--ignore-not-found").Execute()
if err != nil {
e2e.Logf("Error occurred when deleting ConfigMap/cluster-monitoring-config: %v", err)
}
} else {
e2e.Logf("Reverting changes made to ConfigMap/cluster-monitoring-config ...")
*prevConfigPtr = strings.ReplaceAll(strings.ReplaceAll(*prevConfigPtr, "\n", "\\n"), "\"", "\\\"")
err := oc.AsAdmin().WithoutNamespace().Run("patch").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--type", "merge", "-p", fmt.Sprintf("{\"data\":{\"config.yaml\": \"%s\"}}", *prevConfigPtr)).Execute()
if err != nil {
e2e.Logf("Error occurred when patching ConfigMap/cluster-monitoring-config: %v", err)
}
}
e2e.Logf("Deleting ServiceMonitors and PodMonitors in the hive namespace ...")
err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("ServiceMonitor", "hive-clustersync", "-n", HiveNamespace, "--ignore-not-found").Execute()
if err != nil {
e2e.Logf("Error occurred when deleting ServiceMonitor/hive-clustersync: %v", err)
}
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("ServiceMonitor", "hive-controllers", "-n", HiveNamespace, "--ignore-not-found").Execute()
if err != nil {
e2e.Logf("Error occurred when deleting ServiceMonitor/hive-controllers: %v", err)
}
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("PodMonitor", "hive-operator", "-n", HiveNamespace, "--ignore-not-found").Execute()
if err != nil {
e2e.Logf("Error occurred when deleting PodMonitor/hive-operator: %v", err)
}
return
}
e2e.Logf("No recovering needed for cluster monitoring configurations. ")
} | hive | ||||
function | openshift/openshift-tests-private | ba833d63-edcc-42b8-ad98-27082b5631ee | exportMetric | ['"strings"'] | ['hiveconfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func exportMetric(oc *exutil.CLI, action bool) {
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("HiveConfig", "hive", "-o=jsonpath={.spec.exportMetrics}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
if action {
if strings.Contains(output, "true") {
e2e.Logf("The exportMetrics has been enabled in hiveconfig, won't change")
} else {
e2e.Logf("Enable hive exportMetric in Hiveconfig.")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"HiveConfig", "hive", "--type", "merge", "-p", `{"spec":{"exportMetrics": true}}`}).check(oc)
hiveNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Hiveconfig", "hive", "-o=jsonpath={.spec.targetNamespace}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(hiveNS).NotTo(o.BeEmpty())
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-k8s", ok, DefaultTimeout, []string{"role", "-n", hiveNS}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-k8s", ok, DefaultTimeout, []string{"rolebinding", "-n", hiveNS}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-clustersync", ok, DefaultTimeout, []string{"servicemonitor", "-n", hiveNS, "-o=name"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", ok, DefaultTimeout, []string{"servicemonitor", "-n", hiveNS, "-o=name"}).check(oc)
}
}
if !action {
if !strings.Contains(output, "true") {
e2e.Logf("The exportMetrics has been disabled in hiveconfig, won't change")
} else {
e2e.Logf("Disable hive exportMetric in Hiveconfig.")
newCheck("expect", "patch", asAdmin, withoutNamespace, contain, "patched", ok, DefaultTimeout, []string{"HiveConfig", "hive", "--type", "merge", "-p", `{"spec":{"exportMetrics": false}}`}).check(oc)
hiveNS, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Hiveconfig", "hive", "-o=jsonpath={.spec.targetNamespace}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(hiveNS).NotTo(o.BeEmpty())
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-k8s", nok, DefaultTimeout, []string{"role", "-n", hiveNS}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "prometheus-k8s", nok, DefaultTimeout, []string{"rolebinding", "-n", hiveNS}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-clustersync", nok, DefaultTimeout, []string{"servicemonitor", "-n", hiveNS, "-o=name"}).check(oc)
newCheck("expect", "get", asAdmin, withoutNamespace, contain, "hive-controllers", nok, DefaultTimeout, []string{"servicemonitor", "-n", hiveNS, "-o=name"}).check(oc)
}
}
} | hive | |||
function | openshift/openshift-tests-private | ad59bb99-1730-4c77-8c51-341bfa8ca737 | doPrometheusQuery | ['"encoding/json"', '"fmt"', '"os/exec"'] | ['prometheusQueryResult'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func doPrometheusQuery(oc *exutil.CLI, token string, url string, query string) prometheusQueryResult {
var data prometheusQueryResult
msg, _, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(
"-n", "openshift-monitoring", "-c", "prometheus", "prometheus-k8s-0", "-i", "--",
"curl", "-k", "-H", fmt.Sprintf("Authorization: Bearer %v", token),
fmt.Sprintf("%s%s", url, query)).Outputs()
if err != nil {
e2e.Failf("Failed Prometheus query, error: %v", err)
}
o.Expect(msg).NotTo(o.BeEmpty())
json.Unmarshal([]byte(msg), &data)
return data
} | hive | |||
function | openshift/openshift-tests-private | 05cdd7be-56e9-480d-a28d-9aa8a6f39b20 | checkMetricExist | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func checkMetricExist(oc *exutil.CLI, expect bool, token string, url string, query []string) {
for _, v := range query {
e2e.Logf("Check metric %s", v)
err := wait.Poll(1*time.Minute, (ClusterResumeTimeout/60)*time.Minute, func() (bool, error) {
data := doPrometheusQuery(oc, token, url, v)
if expect && len(data.Data.Result) > 0 {
e2e.Logf("Metric %s exist, expected", v)
return true, nil
}
if !expect && len(data.Data.Result) == 0 {
e2e.Logf("Metric %s doesn't exist, expected", v)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "\"checkMetricExist\" fail, can not get expected result")
}
} | hive | ||||
function | openshift/openshift-tests-private | e83a37b2-bb6a-4c7c-8354-b65c357a8210 | checkResourcesMetricValue | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func checkResourcesMetricValue(oc *exutil.CLI, resourceName, resourceNamespace string, expectedResult string, token string, url string, query string) {
err := wait.Poll(1*time.Minute, (ClusterResumeTimeout/60)*time.Minute, func() (bool, error) {
data := doPrometheusQuery(oc, token, url, query)
for _, v := range data.Data.Result {
switch query {
case "hive_clusterclaim_assignment_delay_seconds_count", "hive_clusterpool_stale_clusterdeployments_deleted":
if v.Metric.ClusterpoolName == resourceName && v.Metric.ClusterpoolNamespace == resourceNamespace {
e2e.Logf("Found metric for pool %s in namespace %s", resourceName, resourceNamespace)
if v.Value[1].(string) == expectedResult {
e2e.Logf("The metric Value %s matches expected %s", v.Value[1].(string), expectedResult)
return true, nil
}
e2e.Logf("The metric Value %s didn't match expected %s, try next round", v.Value[1].(string), expectedResult)
return false, nil
}
case "hive_cluster_deployment_provision_underway_install_restarts":
if v.Metric.ClusterDeployment == resourceName && v.Metric.ExportedNamespace == resourceNamespace {
e2e.Logf("Found metric for ClusterDeployment %s in namespace %s", resourceName, resourceNamespace)
if v.Value[1].(string) == expectedResult {
e2e.Logf("The metric Value %s matches expected %s", v.Value[1].(string), expectedResult)
return true, nil
}
e2e.Logf("The metric Value %s didn't match expected %s, try next round", v.Value[1].(string), expectedResult)
return false, nil
}
case "hive_cluster_deployment_install_success_total_count":
if v.Metric.Region == resourceName && v.Metric.Namespace == resourceNamespace {
if data.Data.Result[0].Metric.InstallAttempt == expectedResult {
e2e.Logf("The region %s has %s install attempts", v.Metric.Region, data.Data.Result[0].Metric.InstallAttempt)
return true, nil
}
e2e.Logf("The metric InstallAttempt label %s didn't match expected %s, try next round", data.Data.Result[0].Metric.InstallAttempt, expectedResult)
return false, nil
}
case "hive_cluster_deployment_install_failure_total_count":
if v.Metric.Region == resourceName && v.Metric.Namespace == resourceNamespace {
if data.Data.Result[2].Metric.InstallAttempt == expectedResult {
e2e.Logf("The region %s has %s install attempts", v.Metric.Region, data.Data.Result[2].Metric.InstallAttempt)
return true, nil
}
e2e.Logf("The metric InstallAttempt label %s didn't match expected %s, try next round", data.Data.Result[2].Metric.InstallAttempt, expectedResult)
return false, nil
}
}
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "\"checkResourcesMetricValue\" fail, can not get expected result")
} | hive | ||||
function | openshift/openshift-tests-private | 5f8ceec5-9860-492d-a0b8-5812859e79f4 | checkHiveConfigMetric | ['"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func checkHiveConfigMetric(oc *exutil.CLI, field string, expectedResult string, token string, url string, query string) {
err := wait.Poll(1*time.Minute, (ClusterResumeTimeout/60)*time.Minute, func() (bool, error) {
data := doPrometheusQuery(oc, token, url, query)
switch field {
case "condition":
if data.Data.Result[0].Metric.Condition == expectedResult {
e2e.Logf("the Metric %s field \"%s\" matched the expected result \"%s\"", query, field, expectedResult)
return true, nil
}
case "reason":
if data.Data.Result[0].Metric.Reason == expectedResult {
e2e.Logf("the Metric %s field \"%s\" matched the expected result \"%s\"", query, field, expectedResult)
return true, nil
}
default:
e2e.Logf("the Metric %s doesn't contain field %s", query, field)
return false, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "\"checkHiveConfigMetric\" fail, can not get expected result")
} | hive | ||||
function | openshift/openshift-tests-private | eb7ce95d-fa7e-4d72-bd1c-75ed5b4def30 | createCD | ['"path/filepath"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"', '"github.com/aws/aws-sdk-go-v2/credentials"'] | ['clusterImageSet', 'installConfig', 'clusterDeployment', 'azureInstallConfig', 'azureClusterDeployment', 'gcpInstallConfig', 'gcpClusterDeployment', 'vSphereInstallConfig', 'vSphereClusterDeployment'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func createCD(testDataDir string, testOCPImage string, oc *exutil.CLI, ns string, installConfigSecret interface{}, cd interface{}) {
switch x := cd.(type) {
case clusterDeployment:
exutil.By("Create AWS ClusterDeployment..." + ns)
imageSet := clusterImageSet{
name: x.name + "-imageset",
releaseImage: testOCPImage,
template: filepath.Join(testDataDir, "clusterimageset.yaml"),
}
exutil.By("Create ClusterImageSet...")
imageSet.create(oc)
//secrets can be accessed by pod in the same namespace, so copy pull-secret and aws-creds to target namespace for the pool
exutil.By("Copy AWS platform credentials...")
createAWSCreds(oc, ns)
exutil.By("Copy pull-secret...")
createPullSecret(oc, ns)
exutil.By("Create AWS Install-Config Secret...")
switch ic := installConfigSecret.(type) {
case installConfig:
ic.create(oc)
default:
e2e.Failf("Incorrect install-config type")
}
x.create(oc)
case gcpClusterDeployment:
exutil.By("Create gcp ClusterDeployment..." + ns)
imageSet := clusterImageSet{
name: x.name + "-imageset",
releaseImage: testOCPImage,
template: filepath.Join(testDataDir, "clusterimageset.yaml"),
}
exutil.By("Create ClusterImageSet...")
imageSet.create(oc)
//secrets can be accessed by pod in the same namespace, so copy pull-secret and aws-creds to target namespace for the pool
exutil.By("Copy GCP platform credentials...")
createGCPCreds(oc, ns)
exutil.By("Copy pull-secret...")
createPullSecret(oc, ns)
exutil.By("Create GCP Install-Config Secret...")
switch ic := installConfigSecret.(type) {
case gcpInstallConfig:
ic.create(oc)
default:
e2e.Failf("Incorrect install-config type")
}
x.create(oc)
case azureClusterDeployment:
exutil.By("Create azure ClusterDeployment..." + ns)
imageSet := clusterImageSet{
name: x.name + "-imageset",
releaseImage: testOCPImage,
template: filepath.Join(testDataDir, "clusterimageset.yaml"),
}
exutil.By("Create ClusterImageSet...")
imageSet.create(oc)
//secrets can be accessed by pod in the same namespace, so copy pull-secret and aws-creds to target namespace for the pool
exutil.By("Copy Azure platform credentials...")
createAzureCreds(oc, ns)
exutil.By("Copy pull-secret...")
createPullSecret(oc, ns)
exutil.By("Create Azure Install-Config Secret...")
switch ic := installConfigSecret.(type) {
case azureInstallConfig:
ic.create(oc)
default:
e2e.Failf("Incorrect install-config type")
}
x.create(oc)
case vSphereClusterDeployment:
exutil.By("Creating vSphere ClusterDeployment in namespace: " + ns)
imageSet := clusterImageSet{
name: x.name + "-imageset",
releaseImage: testOCPImage,
template: filepath.Join(testDataDir, "clusterimageset.yaml"),
}
exutil.By("Creating ClusterImageSet")
imageSet.create(oc)
exutil.By("Copying vSphere platform credentials")
createVSphereCreds(oc, ns, x.vCenter)
exutil.By("Copying pull-secret")
createPullSecret(oc, ns)
exutil.By("Creating vCenter certificates Secret")
createVsphereCertsSecret(oc, ns, x.vCenter)
exutil.By("Creating vSphere Install-Config Secret")
switch ic := installConfigSecret.(type) {
case vSphereInstallConfig:
ic.create(oc)
default:
e2e.Failf("Incorrect install-config type")
}
x.create(oc)
default:
exutil.By("Unknown ClusterDeployment type")
}
} | hive | |||
function | openshift/openshift-tests-private | a9e3b042-cbfd-4a45-98bf-c97ab82422d1 | cleanCD | ['objectTableRef'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func cleanCD(oc *exutil.CLI, clusterImageSetName string, ns string, secretName string, cdName string) {
defer cleanupObjects(oc, objectTableRef{"ClusterImageSet", "", clusterImageSetName})
defer cleanupObjects(oc, objectTableRef{"Secret", ns, secretName})
defer cleanupObjects(oc, objectTableRef{"ClusterDeployment", ns, cdName})
} | hive | ||||
function | openshift/openshift-tests-private | 90f7a648-b4b2-427c-8e8d-d7cfab1c281a | installHiveOperator | ['"path/filepath"'] | ['hiveNameSpace', 'operatorGroup', 'subscription', 'hiveconfig'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func installHiveOperator(oc *exutil.CLI, ns *hiveNameSpace, og *operatorGroup, sub *subscription, hc *hiveconfig, testDataDir string) (string, error) {
nsTemp := filepath.Join(testDataDir, "namespace.yaml")
ogTemp := filepath.Join(testDataDir, "operatorgroup.yaml")
subTemp := filepath.Join(testDataDir, "subscription.yaml")
hcTemp := filepath.Join(testDataDir, "hiveconfig.yaml")
*ns = hiveNameSpace{
name: HiveNamespace,
template: nsTemp,
}
*og = operatorGroup{
name: "hive-og",
namespace: HiveNamespace,
template: ogTemp,
}
*sub = subscription{
name: "hive-sub",
namespace: HiveNamespace,
channel: "alpha",
approval: "Automatic",
operatorName: "hive-operator",
sourceName: "community-operators",
sourceNamespace: "openshift-marketplace",
startingCSV: "",
currentCSV: "",
installedCSV: "",
template: subTemp,
}
*hc = hiveconfig{
logLevel: "debug",
targetNamespace: HiveNamespace,
template: hcTemp,
}
// Create Hive Resources if not exist
ns.createIfNotExist(oc)
og.createIfNotExist(oc)
sub.createIfNotExist(oc)
hc.createIfNotExist(oc)
return "success", nil
} | hive | |||
function | openshift/openshift-tests-private | 74e3bc2c-c28a-4561-be2a-c7c03b79aa73 | getHiveadmissionPod | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getHiveadmissionPod(oc *exutil.CLI, namespace string) string {
hiveadmissionOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector=app=hiveadmission", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podArray := strings.Split(strings.TrimSpace(hiveadmissionOutput), " ")
o.Expect(len(podArray)).To(o.BeNumerically(">", 0))
e2e.Logf("Hiveadmission pod list is %s,first pod name is %s", podArray, podArray[0])
return podArray[0]
} | hive | ||||
function | openshift/openshift-tests-private | a3eac100-88d2-4043-b61a-817768ef4b84 | getHivecontrollersPod | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/cluster_operator/hive/hive_util.go | func getHivecontrollersPod(oc *exutil.CLI, namespace string) string {
hivecontrollersOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "--selector=control-plane=controller-manager", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
podArray := strings.Split(strings.TrimSpace(hivecontrollersOutput), " ")
o.Expect(len(podArray)).To(o.BeNumerically(">", 0))
e2e.Logf("Hivecontrollers pod list is %s,first pod name is %s", podArray, podArray[0])
return podArray[0]
} | hive |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.